summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--clang-2690385/AndroidVersion.txt1
-rw-r--r--clang-2690385/MODULE_LICENSE_BSD_LIKE0
-rw-r--r--clang-2690385/MODULE_LICENSE_MIT0
-rw-r--r--clang-2690385/NOTICE457
-rwxr-xr-xclang-2690385/bin/FileCheckbin0 -> 552728 bytes
-rw-r--r--clang-2690385/bin/arm64-v8a/analyzer7
-rw-r--r--clang-2690385/bin/arm64-v8a/analyzer++7
-rw-r--r--clang-2690385/bin/armeabi-v7a-hard/analyzer7
-rw-r--r--clang-2690385/bin/armeabi-v7a-hard/analyzer++7
-rw-r--r--clang-2690385/bin/armeabi-v7a/analyzer7
-rw-r--r--clang-2690385/bin/armeabi-v7a/analyzer++7
-rw-r--r--clang-2690385/bin/armeabi/analyzer7
-rw-r--r--clang-2690385/bin/armeabi/analyzer++7
-rwxr-xr-xclang-2690385/bin/asan_device_setup434
-rwxr-xr-xclang-2690385/bin/clangbin0 -> 45503704 bytes
-rwxr-xr-xclang-2690385/bin/clang++bin0 -> 45503704 bytes
-rwxr-xr-xclang-2690385/bin/llvm-asbin0 -> 3301896 bytes
-rwxr-xr-xclang-2690385/bin/llvm-disbin0 -> 2670620 bytes
-rwxr-xr-xclang-2690385/bin/llvm-linkbin0 -> 4206096 bytes
-rw-r--r--clang-2690385/bin/mips/analyzer7
-rw-r--r--clang-2690385/bin/mips/analyzer++7
-rw-r--r--clang-2690385/bin/mips64/analyzer7
-rw-r--r--clang-2690385/bin/mips64/analyzer++7
-rw-r--r--clang-2690385/bin/x86/analyzer7
-rw-r--r--clang-2690385/bin/x86/analyzer++7
-rw-r--r--clang-2690385/bin/x86_64/analyzer7
-rw-r--r--clang-2690385/bin/x86_64/analyzer++7
-rwxr-xr-xclang-2690385/lib64/LLVMgold.dylibbin0 -> 187428 bytes
l---------clang-2690385/lib64/clang/3.8.2562291
-rw-r--r--clang-2690385/lib64/clang/3.8/include/Intrin.h958
-rw-r--r--clang-2690385/lib64/clang/3.8/include/__clang_cuda_runtime_wrapper.h216
-rw-r--r--clang-2690385/lib64/clang/3.8/include/__stddef_max_align_t.h43
-rw-r--r--clang-2690385/lib64/clang/3.8/include/__wmmintrin_aes.h66
-rw-r--r--clang-2690385/lib64/clang/3.8/include/__wmmintrin_pclmul.h30
-rw-r--r--clang-2690385/lib64/clang/3.8/include/adxintrin.h86
-rw-r--r--clang-2690385/lib64/clang/3.8/include/altivec.h13919
-rw-r--r--clang-2690385/lib64/clang/3.8/include/ammintrin.h209
-rw-r--r--clang-2690385/lib64/clang/3.8/include/arm_acle.h308
-rw-r--r--clang-2690385/lib64/clang/3.8/include/arm_neon.h69237
-rw-r--r--clang-2690385/lib64/clang/3.8/include/avx2intrin.h1215
-rw-r--r--clang-2690385/lib64/clang/3.8/include/avx512bwintrin.h1542
-rw-r--r--clang-2690385/lib64/clang/3.8/include/avx512cdintrin.h131
-rw-r--r--clang-2690385/lib64/clang/3.8/include/avx512dqintrin.h778
-rw-r--r--clang-2690385/lib64/clang/3.8/include/avx512erintrin.h285
-rw-r--r--clang-2690385/lib64/clang/3.8/include/avx512fintrin.h3074
-rw-r--r--clang-2690385/lib64/clang/3.8/include/avx512vlbwintrin.h2336
-rw-r--r--clang-2690385/lib64/clang/3.8/include/avx512vldqintrin.h953
-rw-r--r--clang-2690385/lib64/clang/3.8/include/avx512vlintrin.h4606
-rw-r--r--clang-2690385/lib64/clang/3.8/include/avxintrin.h1318
-rw-r--r--clang-2690385/lib64/clang/3.8/include/bmi2intrin.h95
-rw-r--r--clang-2690385/lib64/clang/3.8/include/bmiintrin.h155
-rw-r--r--clang-2690385/lib64/clang/3.8/include/cpuid.h209
-rw-r--r--clang-2690385/lib64/clang/3.8/include/cuda_builtin_vars.h110
-rw-r--r--clang-2690385/lib64/clang/3.8/include/emmintrin.h1491
-rw-r--r--clang-2690385/lib64/clang/3.8/include/f16cintrin.h45
-rw-r--r--clang-2690385/lib64/clang/3.8/include/float.h124
-rw-r--r--clang-2690385/lib64/clang/3.8/include/fma4intrin.h230
-rw-r--r--clang-2690385/lib64/clang/3.8/include/fmaintrin.h228
-rw-r--r--clang-2690385/lib64/clang/3.8/include/fxsrintrin.h55
-rw-r--r--clang-2690385/lib64/clang/3.8/include/htmintrin.h226
-rw-r--r--clang-2690385/lib64/clang/3.8/include/htmxlintrin.h363
-rw-r--r--clang-2690385/lib64/clang/3.8/include/ia32intrin.h101
-rw-r--r--clang-2690385/lib64/clang/3.8/include/immintrin.h172
-rw-r--r--clang-2690385/lib64/clang/3.8/include/inttypes.h102
-rw-r--r--clang-2690385/lib64/clang/3.8/include/iso646.h43
-rw-r--r--clang-2690385/lib64/clang/3.8/include/limits.h118
-rw-r--r--clang-2690385/lib64/clang/3.8/include/lzcntintrin.h68
-rw-r--r--clang-2690385/lib64/clang/3.8/include/mm3dnow.h171
-rw-r--r--clang-2690385/lib64/clang/3.8/include/mm_malloc.h75
-rw-r--r--clang-2690385/lib64/clang/3.8/include/mmintrin.h503
-rw-r--r--clang-2690385/lib64/clang/3.8/include/module.modulemap171
-rw-r--r--clang-2690385/lib64/clang/3.8/include/nmmintrin.h30
-rw-r--r--clang-2690385/lib64/clang/3.8/include/pmmintrin.h116
-rw-r--r--clang-2690385/lib64/clang/3.8/include/popcntintrin.h58
-rw-r--r--clang-2690385/lib64/clang/3.8/include/prfchwintrin.h45
-rw-r--r--clang-2690385/lib64/clang/3.8/include/rdseedintrin.h56
-rw-r--r--clang-2690385/lib64/clang/3.8/include/rtmintrin.h59
-rw-r--r--clang-2690385/lib64/clang/3.8/include/s390intrin.h39
-rw-r--r--clang-2690385/lib64/clang/3.8/include/sanitizer/allocator_interface.h66
-rw-r--r--clang-2690385/lib64/clang/3.8/include/sanitizer/asan_interface.h151
-rw-r--r--clang-2690385/lib64/clang/3.8/include/sanitizer/common_interface_defs.h135
-rw-r--r--clang-2690385/lib64/clang/3.8/include/sanitizer/coverage_interface.h65
-rw-r--r--clang-2690385/lib64/clang/3.8/include/sanitizer/dfsan_interface.h116
-rw-r--r--clang-2690385/lib64/clang/3.8/include/sanitizer/linux_syscall_hooks.h3070
-rw-r--r--clang-2690385/lib64/clang/3.8/include/sanitizer/lsan_interface.h84
-rw-r--r--clang-2690385/lib64/clang/3.8/include/sanitizer/msan_interface.h111
-rw-r--r--clang-2690385/lib64/clang/3.8/include/sanitizer/tsan_interface_atomic.h222
-rw-r--r--clang-2690385/lib64/clang/3.8/include/shaintrin.h75
-rw-r--r--clang-2690385/lib64/clang/3.8/include/smmintrin.h508
-rw-r--r--clang-2690385/lib64/clang/3.8/include/stdalign.h35
-rw-r--r--clang-2690385/lib64/clang/3.8/include/stdarg.h52
-rw-r--r--clang-2690385/lib64/clang/3.8/include/stdatomic.h582
-rw-r--r--clang-2690385/lib64/clang/3.8/include/stdbool.h44
-rw-r--r--clang-2690385/lib64/clang/3.8/include/stddef.h137
-rw-r--r--clang-2690385/lib64/clang/3.8/include/stdint.h707
-rw-r--r--clang-2690385/lib64/clang/3.8/include/stdnoreturn.h30
-rw-r--r--clang-2690385/lib64/clang/3.8/include/tbmintrin.h154
-rw-r--r--clang-2690385/lib64/clang/3.8/include/tgmath.h1374
-rw-r--r--clang-2690385/lib64/clang/3.8/include/tmmintrin.h221
-rw-r--r--clang-2690385/lib64/clang/3.8/include/unwind.h282
-rw-r--r--clang-2690385/lib64/clang/3.8/include/vadefs.h65
-rw-r--r--clang-2690385/lib64/clang/3.8/include/varargs.h26
-rw-r--r--clang-2690385/lib64/clang/3.8/include/vecintrin.h8946
-rw-r--r--clang-2690385/lib64/clang/3.8/include/wmmintrin.h33
-rw-r--r--clang-2690385/lib64/clang/3.8/include/x86intrin.h57
-rw-r--r--clang-2690385/lib64/clang/3.8/include/xmmintrin.h1010
-rw-r--r--clang-2690385/lib64/clang/3.8/include/xopintrin.h782
-rw-r--r--clang-2690385/lib64/clang/3.8/include/xsavecintrin.h48
-rw-r--r--clang-2690385/lib64/clang/3.8/include/xsaveintrin.h58
-rw-r--r--clang-2690385/lib64/clang/3.8/include/xsaveoptintrin.h48
-rw-r--r--clang-2690385/lib64/clang/3.8/include/xsavesintrin.h58
-rw-r--r--clang-2690385/lib64/clang/3.8/include/xtestintrin.h41
-rwxr-xr-xclang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-aarch64-android.sobin0 -> 675416 bytes
-rwxr-xr-xclang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-arm-android.sobin0 -> 695264 bytes
-rwxr-xr-xclang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-i686-android.sobin0 -> 789504 bytes
-rw-r--r--clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-aarch64-android.abin0 -> 147994 bytes
-rw-r--r--clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-arm-android.abin0 -> 84090 bytes
-rw-r--r--clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-i686-android.abin0 -> 88978 bytes
-rw-r--r--clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-mips64el-android.abin0 -> 222274 bytes
-rw-r--r--clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-mipsel-android.abin0 -> 95910 bytes
-rw-r--r--clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-x86_64-android.abin0 -> 139738 bytes
-rwxr-xr-xclang-2690385/lib64/libLLVM.dylibbin0 -> 37257336 bytes
-rwxr-xr-xclang-2690385/lib64/libc++.dylibbin0 -> 1553360 bytes
-rw-r--r--clang-2690385/repo.prop40
-rw-r--r--clang-2690385/tools/scan-build/CMakeLists.txt82
-rw-r--r--clang-2690385/tools/scan-build/Makefile53
-rwxr-xr-xclang-2690385/tools/scan-build/bin/scan-build1832
-rw-r--r--clang-2690385/tools/scan-build/bin/scan-build.bat1
-rwxr-xr-xclang-2690385/tools/scan-build/bin/set-xcode-analyzer114
-rwxr-xr-xclang-2690385/tools/scan-build/libexec/c++-analyzer8
-rw-r--r--clang-2690385/tools/scan-build/libexec/c++-analyzer.bat1
-rwxr-xr-xclang-2690385/tools/scan-build/libexec/ccc-analyzer777
-rw-r--r--clang-2690385/tools/scan-build/libexec/ccc-analyzer.bat1
-rw-r--r--clang-2690385/tools/scan-build/man/scan-build.1349
-rw-r--r--clang-2690385/tools/scan-build/share/scan-build/scanview.css62
-rw-r--r--clang-2690385/tools/scan-build/share/scan-build/sorttable.js492
-rw-r--r--clang-2690385/tools/scan-view/CMakeLists.txt41
-rw-r--r--clang-2690385/tools/scan-view/Makefile37
-rwxr-xr-xclang-2690385/tools/scan-view/bin/scan-view143
-rw-r--r--clang-2690385/tools/scan-view/share/FileRadar.scptbin0 -> 18418 bytes
-rw-r--r--clang-2690385/tools/scan-view/share/GetRadarVersion.scpt0
-rw-r--r--clang-2690385/tools/scan-view/share/Reporter.py248
-rw-r--r--clang-2690385/tools/scan-view/share/ScanView.py767
-rw-r--r--clang-2690385/tools/scan-view/share/bugcatcher.icobin0 -> 318 bytes
-rw-r--r--clang-2690385/tools/scan-view/share/startfile.py203
145 files changed, 132217 insertions, 0 deletions
diff --git a/clang-2690385/AndroidVersion.txt b/clang-2690385/AndroidVersion.txt
new file mode 100644
index 00000000..7026a5fb
--- /dev/null
+++ b/clang-2690385/AndroidVersion.txt
@@ -0,0 +1 @@
+3.8.256229
diff --git a/clang-2690385/MODULE_LICENSE_BSD_LIKE b/clang-2690385/MODULE_LICENSE_BSD_LIKE
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/clang-2690385/MODULE_LICENSE_BSD_LIKE
diff --git a/clang-2690385/MODULE_LICENSE_MIT b/clang-2690385/MODULE_LICENSE_MIT
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/clang-2690385/MODULE_LICENSE_MIT
diff --git a/clang-2690385/NOTICE b/clang-2690385/NOTICE
new file mode 100644
index 00000000..83addef2
--- /dev/null
+++ b/clang-2690385/NOTICE
@@ -0,0 +1,457 @@
+==============================================================================
+LLVM Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2007-2014 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+The following pieces of software have additional or alternate copyrights,
+licenses, and/or restrictions:
+
+Program Directory
+------- ---------
+<none yet>
+
+
+==============================================================================
+compiler_rt License
+==============================================================================
+
+The compiler_rt library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license. As a user of this code you may choose
+to use it under either license. As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
+
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+==============================================================================
+Copyrights and Licenses for Third Party Software Distributed with LLVM:
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+
+==============================================================================
+libc++ License
+==============================================================================
+
+The libc++ library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license. As a user of this code you may choose
+to use it under either license. As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
+
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+==============================================================================
+libc++abi License
+==============================================================================
+
+The libc++abi library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license. As a user of this code you may choose
+to use it under either license. As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
+
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+==============================================================================
+libc++abi License
+==============================================================================
+
+The libc++abi library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license. As a user of this code you may choose
+to use it under either license. As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT
+
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+==============================================================================
+LLVM Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2003-2014 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+Copyrights and Licenses for Third Party Software Distributed with LLVM:
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+The following pieces of software have additional or alternate copyrights,
+licenses, and/or restrictions:
+
+Program Directory
+------- ---------
+Autoconf llvm/autoconf
+ llvm/projects/ModuleMaker/autoconf
+Google Test llvm/utils/unittest/googletest
+OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex}
+pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT}
+ARM contributions llvm/lib/Target/ARM/LICENSE.TXT
+md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h
diff --git a/clang-2690385/bin/FileCheck b/clang-2690385/bin/FileCheck
new file mode 100755
index 00000000..711fa983
--- /dev/null
+++ b/clang-2690385/bin/FileCheck
Binary files differ
diff --git a/clang-2690385/bin/arm64-v8a/analyzer b/clang-2690385/bin/arm64-v8a/analyzer
new file mode 100644
index 00000000..3320e6b5
--- /dev/null
+++ b/clang-2690385/bin/arm64-v8a/analyzer
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang -target aarch64-none-linux-android "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang "$@"
+fi
diff --git a/clang-2690385/bin/arm64-v8a/analyzer++ b/clang-2690385/bin/arm64-v8a/analyzer++
new file mode 100644
index 00000000..88a2e205
--- /dev/null
+++ b/clang-2690385/bin/arm64-v8a/analyzer++
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang++ -target aarch64-none-linux-android "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang++ "$@"
+fi
diff --git a/clang-2690385/bin/armeabi-v7a-hard/analyzer b/clang-2690385/bin/armeabi-v7a-hard/analyzer
new file mode 100644
index 00000000..5a708a7d
--- /dev/null
+++ b/clang-2690385/bin/armeabi-v7a-hard/analyzer
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang -target armv7-none-linux-androideabi "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang "$@"
+fi
diff --git a/clang-2690385/bin/armeabi-v7a-hard/analyzer++ b/clang-2690385/bin/armeabi-v7a-hard/analyzer++
new file mode 100644
index 00000000..3541679f
--- /dev/null
+++ b/clang-2690385/bin/armeabi-v7a-hard/analyzer++
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang++ -target armv7-none-linux-androideabi "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang++ "$@"
+fi
diff --git a/clang-2690385/bin/armeabi-v7a/analyzer b/clang-2690385/bin/armeabi-v7a/analyzer
new file mode 100644
index 00000000..5a708a7d
--- /dev/null
+++ b/clang-2690385/bin/armeabi-v7a/analyzer
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang -target armv7-none-linux-androideabi "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang "$@"
+fi
diff --git a/clang-2690385/bin/armeabi-v7a/analyzer++ b/clang-2690385/bin/armeabi-v7a/analyzer++
new file mode 100644
index 00000000..3541679f
--- /dev/null
+++ b/clang-2690385/bin/armeabi-v7a/analyzer++
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang++ -target armv7-none-linux-androideabi "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang++ "$@"
+fi
diff --git a/clang-2690385/bin/armeabi/analyzer b/clang-2690385/bin/armeabi/analyzer
new file mode 100644
index 00000000..150aa83e
--- /dev/null
+++ b/clang-2690385/bin/armeabi/analyzer
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang -target armv5te-none-linux-androideabi "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang "$@"
+fi
diff --git a/clang-2690385/bin/armeabi/analyzer++ b/clang-2690385/bin/armeabi/analyzer++
new file mode 100644
index 00000000..13f37bdb
--- /dev/null
+++ b/clang-2690385/bin/armeabi/analyzer++
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang++ -target armv5te-none-linux-androideabi "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang++ "$@"
+fi
diff --git a/clang-2690385/bin/asan_device_setup b/clang-2690385/bin/asan_device_setup
new file mode 100755
index 00000000..6cb7b94c
--- /dev/null
+++ b/clang-2690385/bin/asan_device_setup
@@ -0,0 +1,434 @@
+#!/bin/bash
+#===- lib/asan/scripts/asan_device_setup -----------------------------------===#
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+# Prepare Android device to run ASan applications.
+#
+#===------------------------------------------------------------------------===#
+
+set -e
+
+HERE="$(cd "$(dirname "$0")" && pwd)"
+
+revert=no
+extra_options=
+device=
+lib=
+use_su=0
+
+function usage {
+ echo "usage: $0 [--revert] [--device device-id] [--lib path] [--extra-options options]"
+ echo " --revert: Uninstall ASan from the device."
+ echo " --lib: Path to ASan runtime library."
+ echo " --extra-options: Extra ASAN_OPTIONS."
+ echo " --device: Install to the given device. Use 'adb devices' to find"
+ echo " device-id."
+ echo " --use-su: Use 'su -c' prefix for every adb command instead of using"
+ echo " 'adb root' once."
+ echo
+ exit 1
+}
+
+function adb_push {
+ if [ $use_su -eq 0 ]; then
+ $ADB push "$1" "$2"
+ else
+ local FILENAME=$(basename $1)
+ $ADB push "$1" "/data/local/tmp/$FILENAME"
+ $ADB shell su -c "rm \\\"$2/$FILENAME\\\"" >&/dev/null
+ $ADB shell su -c "cat \\\"/data/local/tmp/$FILENAME\\\" > \\\"$2/$FILENAME\\\""
+ $ADB shell su -c "rm \\\"/data/local/tmp/$FILENAME\\\""
+ fi
+}
+
+function adb_remount {
+ if [ $use_su -eq 0 ]; then
+ $ADB remount
+ else
+ local STORAGE=`$ADB shell mount | grep /system | cut -d ' ' -f1`
+ if [ "$STORAGE" != "" ]; then
+ echo Remounting $STORAGE at /system
+ $ADB shell su -c "mount -o remount,rw $STORAGE /system"
+ else
+ echo Failed to get storage device name for "/system" mount point
+ fi
+ fi
+}
+
+function adb_shell {
+ if [ $use_su -eq 0 ]; then
+ $ADB shell $@
+ else
+ $ADB shell su -c "$*"
+ fi
+}
+
+function adb_root {
+ if [ $use_su -eq 0 ]; then
+ $ADB root
+ fi
+}
+
+function adb_wait_for_device {
+ $ADB wait-for-device
+}
+
+function adb_pull {
+ if [ $use_su -eq 0 ]; then
+ $ADB pull "$1" "$2"
+ else
+ local FILENAME=$(basename $1)
+ $ADB shell rm "/data/local/tmp/$FILENAME" >&/dev/null
+ $ADB shell su -c "[ -f \\\"$1\\\" ] && cat \\\"$1\\\" > \\\"/data/local/tmp/$FILENAME\\\" && chown root.shell \\\"/data/local/tmp/$FILENAME\\\" && chmod 755 \\\"/data/local/tmp/$FILENAME\\\"" &&
+ $ADB pull "/data/local/tmp/$FILENAME" "$2" >&/dev/null && $ADB shell "rm \"/data/local/tmp/$FILENAME\""
+ fi
+}
+
+function get_device_arch { # OUT OUT64
+ local _outvar=$1
+ local _outvar64=$2
+ local _ABI=$(adb_shell getprop ro.product.cpu.abi)
+ local _ARCH=
+ local _ARCH64=
+ if [[ $_ABI == x86* ]]; then
+ _ARCH=i686
+ elif [[ $_ABI == armeabi* ]]; then
+ _ARCH=arm
+ elif [[ $_ABI == arm64-v8a* ]]; then
+ _ARCH=arm
+ _ARCH64=aarch64
+ else
+ echo "Unrecognized device ABI: $_ABI"
+ exit 1
+ fi
+ eval $_outvar=\$_ARCH
+ eval $_outvar64=\$_ARCH64
+}
+
+while [[ $# > 0 ]]; do
+ case $1 in
+ --revert)
+ revert=yes
+ ;;
+ --extra-options)
+ shift
+ if [[ $# == 0 ]]; then
+ echo "--extra-options requires an argument."
+ exit 1
+ fi
+ extra_options="$1"
+ ;;
+ --lib)
+ shift
+ if [[ $# == 0 ]]; then
+ echo "--lib requires an argument."
+ exit 1
+ fi
+ lib="$1"
+ ;;
+ --device)
+ shift
+ if [[ $# == 0 ]]; then
+ echo "--device requires an argument."
+ exit 1
+ fi
+ device="$1"
+ ;;
+ --use-su)
+ use_su=1
+ ;;
+ *)
+ usage
+ ;;
+ esac
+ shift
+done
+
+ADB=${ADB:-adb}
+if [[ x$device != x ]]; then
+ ADB="$ADB -s $device"
+fi
+
+if [ $use_su -eq 1 ]; then
+ # Test if 'su' is present on the device
+ SU_TEST_OUT=`$ADB shell su -c "echo foo" 2>&1 | sed 's/\r$//'`
+ if [ $? != 0 -o "$SU_TEST_OUT" != "foo" ]; then
+ echo "ERROR: Cannot use 'su -c':"
+ echo "$ adb shell su -c \"echo foo\""
+ echo $SU_TEST_OUT
+ echo "Check that 'su' binary is correctly installed on the device or omit"
+ echo " --use-su flag"
+ exit 1
+ fi
+fi
+
+echo '>> Remounting /system rw'
+adb_wait_for_device
+adb_root
+adb_wait_for_device
+adb_remount
+adb_wait_for_device
+
+get_device_arch ARCH ARCH64
+echo "Target architecture: $ARCH"
+ASAN_RT="libclang_rt.asan-$ARCH-android.so"
+if [[ -n $ARCH64 ]]; then
+ echo "Target architecture: $ARCH64"
+ ASAN_RT64="libclang_rt.asan-$ARCH64-android.so"
+fi
+
+if [[ x$revert == xyes ]]; then
+ echo '>> Uninstalling ASan'
+
+ if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then
+ echo '>> Pre-L device detected.'
+ adb_shell mv /system/bin/app_process.real /system/bin/app_process
+ adb_shell rm /system/bin/asanwrapper
+ elif ! adb_shell ls -l /system/bin/app_process64.real | grep -o 'No such file or directory' >&/dev/null; then
+ # 64-bit installation.
+ adb_shell mv /system/bin/app_process32.real /system/bin/app_process32
+ adb_shell mv /system/bin/app_process64.real /system/bin/app_process64
+ adb_shell rm /system/bin/asanwrapper
+ adb_shell rm /system/bin/asanwrapper64
+ else
+ # 32-bit installation.
+ adb_shell rm /system/bin/app_process.wrap
+ adb_shell rm /system/bin/asanwrapper
+ adb_shell rm /system/bin/app_process
+ adb_shell ln -s /system/bin/app_process32 /system/bin/app_process
+ fi
+
+ echo '>> Restarting shell'
+ adb_shell stop
+ adb_shell start
+
+ # Remove the library on the last step to give a chance to the 'su' binary to
+ # be executed without problem.
+ adb_shell rm /system/lib/$ASAN_RT
+
+ echo '>> Done'
+ exit 0
+fi
+
+if [[ -d "$lib" ]]; then
+ ASAN_RT_PATH="$lib"
+elif [[ -f "$lib" && "$lib" == *"$ASAN_RT" ]]; then
+ ASAN_RT_PATH=$(dirname "$lib")
+elif [[ -f "$HERE/$ASAN_RT" ]]; then
+ ASAN_RT_PATH="$HERE"
+elif [[ $(basename "$HERE") == "bin" ]]; then
+ # We could be in the toolchain's base directory.
+ # Consider ../lib, ../lib/asan, ../lib/linux,
+ # ../lib/clang/$VERSION/lib/linux, and ../lib64/clang/$VERSION/lib/linux.
+ P=$(ls "$HERE"/../lib/"$ASAN_RT" \
+ "$HERE"/../lib/asan/"$ASAN_RT" \
+ "$HERE"/../lib/linux/"$ASAN_RT" \
+ "$HERE"/../lib/clang/*/lib/linux/"$ASAN_RT" \
+ "$HERE"/../lib64/clang/*/lib/linux/"$ASAN_RT" 2>/dev/null | sort | tail -1)
+ if [[ -n "$P" ]]; then
+ ASAN_RT_PATH="$(dirname "$P")"
+ fi
+fi
+
+if [[ -z "$ASAN_RT_PATH" || ! -f "$ASAN_RT_PATH/$ASAN_RT" ]]; then
+ echo ">> ASan runtime library not found"
+ exit 1
+fi
+
+if [[ -n "$ASAN_RT64" ]]; then
+ if [[ -z "$ASAN_RT_PATH" || ! -f "$ASAN_RT_PATH/$ASAN_RT64" ]]; then
+ echo ">> ASan runtime library not found"
+ exit 1
+ fi
+fi
+
+TMPDIRBASE=$(mktemp -d)
+TMPDIROLD="$TMPDIRBASE/old"
+TMPDIR="$TMPDIRBASE/new"
+mkdir "$TMPDIROLD"
+
+RELEASE=$(adb_shell getprop ro.build.version.release)
+PRE_L=0
+if echo "$RELEASE" | grep '^4\.' >&/dev/null; then
+ PRE_L=1
+fi
+
+if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then
+
+ if adb_pull /system/bin/app_process.real /dev/null >&/dev/null; then
+ echo '>> Old-style ASan installation detected. Reverting.'
+ adb_shell mv /system/bin/app_process.real /system/bin/app_process
+ fi
+
+ echo '>> Pre-L device detected. Setting up app_process symlink.'
+ adb_shell mv /system/bin/app_process /system/bin/app_process32
+ adb_shell ln -s /system/bin/app_process32 /system/bin/app_process
+fi
+
+echo '>> Copying files from the device'
+if [[ -n "$ASAN_RT64" ]]; then
+ adb_pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true
+ adb_pull /system/lib64/"$ASAN_RT64" "$TMPDIROLD" || true
+ adb_pull /system/bin/app_process32 "$TMPDIROLD" || true
+ adb_pull /system/bin/app_process32.real "$TMPDIROLD" || true
+ adb_pull /system/bin/app_process64 "$TMPDIROLD" || true
+ adb_pull /system/bin/app_process64.real "$TMPDIROLD" || true
+ adb_pull /system/bin/asanwrapper "$TMPDIROLD" || true
+ adb_pull /system/bin/asanwrapper64 "$TMPDIROLD" || true
+else
+ adb_pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true
+ adb_pull /system/bin/app_process32 "$TMPDIROLD" || true
+ adb_pull /system/bin/app_process.wrap "$TMPDIROLD" || true
+ adb_pull /system/bin/asanwrapper "$TMPDIROLD" || true
+fi
+cp -r "$TMPDIROLD" "$TMPDIR"
+
+if [[ -f "$TMPDIR/app_process.wrap" || -f "$TMPDIR/app_process64.real" ]]; then
+ echo ">> Previous installation detected"
+else
+ echo ">> New installation"
+fi
+
+echo '>> Generating wrappers'
+
+cp "$ASAN_RT_PATH/$ASAN_RT" "$TMPDIR/"
+if [[ -n "$ASAN_RT64" ]]; then
+ cp "$ASAN_RT_PATH/$ASAN_RT64" "$TMPDIR/"
+fi
+
+# FIXME: alloc_dealloc_mismatch=0 prevents a failure in libdvm startup,
+# which may or may not be a real bug (probably not).
+ASAN_OPTIONS=start_deactivated=1,alloc_dealloc_mismatch=0,malloc_context_size=0
+
+function generate_zygote_wrapper { # from, to, asan_rt
+ local _from=$1
+ local _to=$2
+ local _asan_rt=$3
+ cat <<EOF >"$TMPDIR/$_from"
+#!/system/bin/sh-from-zygote
+ASAN_OPTIONS=$ASAN_OPTIONS \\
+ASAN_ACTIVATION_OPTIONS=include_if_exists=/data/local/tmp/asan.options.%b \\
+LD_PRELOAD=\$LD_PRELOAD:$_asan_rt \\
+exec $_to \$@
+
+EOF
+}
+
+# On Android-L not allowing user segv handler breaks some applications.
+if [[ PRE_L -eq 0 ]]; then
+ ASAN_OPTIONS="$ASAN_OPTIONS,allow_user_segv_handler=1"
+fi
+
+if [[ x$extra_options != x ]] ; then
+ ASAN_OPTIONS="$ASAN_OPTIONS,$extra_options"
+fi
+
+# Zygote wrapper.
+if [[ -f "$TMPDIR/app_process64" ]]; then
+ # A 64-bit device.
+ if [[ ! -f "$TMPDIR/app_process64.real" ]]; then
+ # New installation.
+ mv "$TMPDIR/app_process32" "$TMPDIR/app_process32.real"
+ mv "$TMPDIR/app_process64" "$TMPDIR/app_process64.real"
+ fi
+ generate_zygote_wrapper "app_process32" "/system/bin/app_process32.real" "$ASAN_RT"
+ generate_zygote_wrapper "app_process64" "/system/bin/app_process64.real" "$ASAN_RT64"
+else
+ # A 32-bit device.
+ generate_zygote_wrapper "app_process.wrap" "/system/bin/app_process32" "$ASAN_RT"
+fi
+
+# General command-line tool wrapper (use for anything that's not started as
+# zygote).
+cat <<EOF >"$TMPDIR/asanwrapper"
+#!/system/bin/sh
+LD_PRELOAD=$ASAN_RT \\
+exec \$@
+
+EOF
+
+if [[ -n "$ASAN_RT64" ]]; then
+ cat <<EOF >"$TMPDIR/asanwrapper64"
+#!/system/bin/sh
+LD_PRELOAD=$ASAN_RT64 \\
+exec \$@
+
+EOF
+fi
+
+function install { # from, to, chmod, chcon
+ local _from=$1
+ local _to=$2
+ local _mode=$3
+ local _context=$4
+ local _basename="$(basename "$_from")"
+ echo "Installing $_to/$_basename $_mode $_context"
+ adb_push "$_from" "$_to/$_basename"
+ adb_shell chown root.shell "$_to/$_basename"
+ if [[ -n "$_mode" ]]; then
+ adb_shell chmod "$_mode" "$_to/$_basename"
+ fi
+ if [[ -n "$_context" ]]; then
+ adb_shell chcon "$_context" "$_to/$_basename"
+ fi
+}
+
+if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then
+ # Make SELinux happy by keeping app_process wrapper and the shell
+ # it runs on in zygote domain.
+ ENFORCING=0
+ if adb_shell getenforce | grep Enforcing >/dev/null; then
+ # Sometimes shell is not allowed to change file contexts.
+ # Temporarily switch to permissive.
+ ENFORCING=1
+ adb_shell setenforce 0
+ fi
+
+ if [[ PRE_L -eq 1 ]]; then
+ CTX=u:object_r:system_file:s0
+ else
+ CTX=u:object_r:zygote_exec:s0
+ fi
+
+ echo '>> Pushing files to the device'
+
+ if [[ -n "$ASAN_RT64" ]]; then
+ install "$TMPDIR/$ASAN_RT" /system/lib 644
+ install "$TMPDIR/$ASAN_RT64" /system/lib64 644
+ install "$TMPDIR/app_process32" /system/bin 755 $CTX
+ install "$TMPDIR/app_process32.real" /system/bin 755 $CTX
+ install "$TMPDIR/app_process64" /system/bin 755 $CTX
+ install "$TMPDIR/app_process64.real" /system/bin 755 $CTX
+ install "$TMPDIR/asanwrapper" /system/bin 755
+ install "$TMPDIR/asanwrapper64" /system/bin 755
+ else
+ install "$TMPDIR/$ASAN_RT" /system/lib 644
+ install "$TMPDIR/app_process32" /system/bin 755 $CTX
+ install "$TMPDIR/app_process.wrap" /system/bin 755 $CTX
+ install "$TMPDIR/asanwrapper" /system/bin 755 $CTX
+
+ adb_shell rm /system/bin/app_process
+ adb_shell ln -s /system/bin/app_process.wrap /system/bin/app_process
+ fi
+
+ adb_shell cp /system/bin/sh /system/bin/sh-from-zygote
+ adb_shell chcon $CTX /system/bin/sh-from-zygote
+
+ if [ $ENFORCING == 1 ]; then
+ adb_shell setenforce 1
+ fi
+
+ echo '>> Restarting shell (asynchronous)'
+ adb_shell stop
+ adb_shell start
+
+ echo '>> Please wait until the device restarts'
+else
+ echo '>> Device is up to date'
+fi
+
+rm -r "$TMPDIRBASE"
diff --git a/clang-2690385/bin/clang b/clang-2690385/bin/clang
new file mode 100755
index 00000000..475eb2c8
--- /dev/null
+++ b/clang-2690385/bin/clang
Binary files differ
diff --git a/clang-2690385/bin/clang++ b/clang-2690385/bin/clang++
new file mode 100755
index 00000000..475eb2c8
--- /dev/null
+++ b/clang-2690385/bin/clang++
Binary files differ
diff --git a/clang-2690385/bin/llvm-as b/clang-2690385/bin/llvm-as
new file mode 100755
index 00000000..4dfce3de
--- /dev/null
+++ b/clang-2690385/bin/llvm-as
Binary files differ
diff --git a/clang-2690385/bin/llvm-dis b/clang-2690385/bin/llvm-dis
new file mode 100755
index 00000000..083dc197
--- /dev/null
+++ b/clang-2690385/bin/llvm-dis
Binary files differ
diff --git a/clang-2690385/bin/llvm-link b/clang-2690385/bin/llvm-link
new file mode 100755
index 00000000..522650f0
--- /dev/null
+++ b/clang-2690385/bin/llvm-link
Binary files differ
diff --git a/clang-2690385/bin/mips/analyzer b/clang-2690385/bin/mips/analyzer
new file mode 100644
index 00000000..c13dbc72
--- /dev/null
+++ b/clang-2690385/bin/mips/analyzer
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang -target mipsel-none-linux-android "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang "$@"
+fi
diff --git a/clang-2690385/bin/mips/analyzer++ b/clang-2690385/bin/mips/analyzer++
new file mode 100644
index 00000000..61a59ca6
--- /dev/null
+++ b/clang-2690385/bin/mips/analyzer++
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang++ -target mipsel-none-linux-android "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang++ "$@"
+fi
diff --git a/clang-2690385/bin/mips64/analyzer b/clang-2690385/bin/mips64/analyzer
new file mode 100644
index 00000000..3e276cee
--- /dev/null
+++ b/clang-2690385/bin/mips64/analyzer
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang -target mips64el-none-linux-android "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang "$@"
+fi
diff --git a/clang-2690385/bin/mips64/analyzer++ b/clang-2690385/bin/mips64/analyzer++
new file mode 100644
index 00000000..b08deeb3
--- /dev/null
+++ b/clang-2690385/bin/mips64/analyzer++
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang++ -target mips64el-none-linux-android "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang++ "$@"
+fi
diff --git a/clang-2690385/bin/x86/analyzer b/clang-2690385/bin/x86/analyzer
new file mode 100644
index 00000000..bfcbfec1
--- /dev/null
+++ b/clang-2690385/bin/x86/analyzer
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang -target i686-none-linux-android "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang "$@"
+fi
diff --git a/clang-2690385/bin/x86/analyzer++ b/clang-2690385/bin/x86/analyzer++
new file mode 100644
index 00000000..fca62dfd
--- /dev/null
+++ b/clang-2690385/bin/x86/analyzer++
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang++ -target i686-none-linux-android "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang++ "$@"
+fi
diff --git a/clang-2690385/bin/x86_64/analyzer b/clang-2690385/bin/x86_64/analyzer
new file mode 100644
index 00000000..e7db4439
--- /dev/null
+++ b/clang-2690385/bin/x86_64/analyzer
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang -target x86_64-none-linux-android "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang "$@"
+fi
diff --git a/clang-2690385/bin/x86_64/analyzer++ b/clang-2690385/bin/x86_64/analyzer++
new file mode 100644
index 00000000..ed8d15ba
--- /dev/null
+++ b/clang-2690385/bin/x86_64/analyzer++
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ "$1" != "-cc1" ]; then
+ `dirname $0`/../clang++ -target x86_64-none-linux-android "$@"
+else
+ # target/triple already spelled out.
+ `dirname $0`/../clang++ "$@"
+fi
diff --git a/clang-2690385/lib64/LLVMgold.dylib b/clang-2690385/lib64/LLVMgold.dylib
new file mode 100755
index 00000000..53cde2d0
--- /dev/null
+++ b/clang-2690385/lib64/LLVMgold.dylib
Binary files differ
diff --git a/clang-2690385/lib64/clang/3.8.256229 b/clang-2690385/lib64/clang/3.8.256229
new file mode 120000
index 00000000..98fccd6d
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8.256229
@@ -0,0 +1 @@
+3.8 \ No newline at end of file
diff --git a/clang-2690385/lib64/clang/3.8/include/Intrin.h b/clang-2690385/lib64/clang/3.8/include/Intrin.h
new file mode 100644
index 00000000..6c1d0d16
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/Intrin.h
@@ -0,0 +1,958 @@
+/* ===-------- Intrin.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we're compiling for the windows platform. */
+#ifndef _MSC_VER
+#include_next <Intrin.h>
+#else
+
+#ifndef __INTRIN_H
+#define __INTRIN_H
+
+/* First include the standard intrinsics. */
+#if defined(__i386__) || defined(__x86_64__)
+#include <x86intrin.h>
+#endif
+
+/* For the definition of jmp_buf. */
+#if __STDC_HOSTED__
+#include <setjmp.h>
+#endif
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__MMX__)
+/* And the random ones that aren't in those files. */
+__m64 _m_from_float(float);
+float _m_to_float(__m64);
+#endif
+
+/* Other assorted instruction intrinsics. */
+void __addfsbyte(unsigned long, unsigned char);
+void __addfsdword(unsigned long, unsigned long);
+void __addfsword(unsigned long, unsigned short);
+void __code_seg(const char *);
+static __inline__
+void __cpuid(int[4], int);
+static __inline__
+void __cpuidex(int[4], int, int);
+void __debugbreak(void);
+__int64 __emul(int, int);
+unsigned __int64 __emulu(unsigned int, unsigned int);
+void __cdecl __fastfail(unsigned int);
+unsigned int __getcallerseflags(void);
+static __inline__
+void __halt(void);
+unsigned char __inbyte(unsigned short);
+void __inbytestring(unsigned short, unsigned char *, unsigned long);
+void __incfsbyte(unsigned long);
+void __incfsdword(unsigned long);
+void __incfsword(unsigned long);
+unsigned long __indword(unsigned short);
+void __indwordstring(unsigned short, unsigned long *, unsigned long);
+void __int2c(void);
+void __invlpg(void *);
+unsigned short __inword(unsigned short);
+void __inwordstring(unsigned short, unsigned short *, unsigned long);
+void __lidt(void *);
+unsigned __int64 __ll_lshift(unsigned __int64, int);
+__int64 __ll_rshift(__int64, int);
+void __llwpcb(void *);
+unsigned char __lwpins32(unsigned int, unsigned int, unsigned int);
+void __lwpval32(unsigned int, unsigned int, unsigned int);
+unsigned int __lzcnt(unsigned int);
+unsigned short __lzcnt16(unsigned short);
+static __inline__
+void __movsb(unsigned char *, unsigned char const *, size_t);
+static __inline__
+void __movsd(unsigned long *, unsigned long const *, size_t);
+static __inline__
+void __movsw(unsigned short *, unsigned short const *, size_t);
+void __nop(void);
+void __nvreg_restore_fence(void);
+void __nvreg_save_fence(void);
+void __outbyte(unsigned short, unsigned char);
+void __outbytestring(unsigned short, unsigned char *, unsigned long);
+void __outdword(unsigned short, unsigned long);
+void __outdwordstring(unsigned short, unsigned long *, unsigned long);
+void __outword(unsigned short, unsigned short);
+void __outwordstring(unsigned short, unsigned short *, unsigned long);
+static __inline__
+unsigned int __popcnt(unsigned int);
+static __inline__
+unsigned short __popcnt16(unsigned short);
+unsigned long __readcr0(void);
+unsigned long __readcr2(void);
+static __inline__
+unsigned long __readcr3(void);
+unsigned long __readcr4(void);
+unsigned long __readcr8(void);
+unsigned int __readdr(unsigned int);
+#ifdef __i386__
+static __inline__
+unsigned char __readfsbyte(unsigned long);
+static __inline__
+unsigned long __readfsdword(unsigned long);
+static __inline__
+unsigned __int64 __readfsqword(unsigned long);
+static __inline__
+unsigned short __readfsword(unsigned long);
+#endif
+static __inline__
+unsigned __int64 __readmsr(unsigned long);
+unsigned __int64 __readpmc(unsigned long);
+unsigned long __segmentlimit(unsigned long);
+void __sidt(void *);
+void *__slwpcb(void);
+static __inline__
+void __stosb(unsigned char *, unsigned char, size_t);
+static __inline__
+void __stosd(unsigned long *, unsigned long, size_t);
+static __inline__
+void __stosw(unsigned short *, unsigned short, size_t);
+void __svm_clgi(void);
+void __svm_invlpga(void *, int);
+void __svm_skinit(int);
+void __svm_stgi(void);
+void __svm_vmload(size_t);
+void __svm_vmrun(size_t);
+void __svm_vmsave(size_t);
+void __ud2(void);
+unsigned __int64 __ull_rshift(unsigned __int64, int);
+void __vmx_off(void);
+void __vmx_vmptrst(unsigned __int64 *);
+void __wbinvd(void);
+void __writecr0(unsigned int);
+static __inline__
+void __writecr3(unsigned int);
+void __writecr4(unsigned int);
+void __writecr8(unsigned int);
+void __writedr(unsigned int, unsigned int);
+void __writefsbyte(unsigned long, unsigned char);
+void __writefsdword(unsigned long, unsigned long);
+void __writefsqword(unsigned long, unsigned __int64);
+void __writefsword(unsigned long, unsigned short);
+void __writemsr(unsigned long, unsigned __int64);
+static __inline__
+void *_AddressOfReturnAddress(void);
+static __inline__
+unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
+static __inline__
+unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
+static __inline__
+unsigned char _bittest(long const *, long);
+static __inline__
+unsigned char _bittestandcomplement(long *, long);
+static __inline__
+unsigned char _bittestandreset(long *, long);
+static __inline__
+unsigned char _bittestandset(long *, long);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
+unsigned long __cdecl _byteswap_ulong(unsigned long);
+unsigned short __cdecl _byteswap_ushort(unsigned short);
+void __cdecl _disable(void);
+void __cdecl _enable(void);
+long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);
+static __inline__
+long _InterlockedAnd(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedAnd16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedAnd8(char volatile *_Value, char _Mask);
+unsigned char _interlockedbittestandreset(long volatile *, long);
+static __inline__
+unsigned char _interlockedbittestandset(long volatile *, long);
+static __inline__
+long __cdecl _InterlockedCompareExchange(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_HLEAcquire(long volatile *, long, long);
+long _InterlockedCompareExchange_HLERelease(long volatile *, long, long);
+static __inline__
+short _InterlockedCompareExchange16(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+static __inline__
+__int64 _InterlockedCompareExchange64(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedcompareExchange64_HLEAcquire(__int64 volatile *, __int64,
+ __int64);
+__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
+ __int64);
+static __inline__
+char _InterlockedCompareExchange8(char volatile *_Destination, char _Exchange,
+ char _Comparand);
+void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,
+ void *);
+void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,
+ void *);
+static __inline__
+long __cdecl _InterlockedDecrement(long volatile *_Addend);
+static __inline__
+short _InterlockedDecrement16(short volatile *_Addend);
+long _InterlockedExchange(long volatile *_Target, long _Value);
+static __inline__
+short _InterlockedExchange16(short volatile *_Target, short _Value);
+static __inline__
+char _InterlockedExchange8(char volatile *_Target, char _Value);
+static __inline__
+long __cdecl _InterlockedExchangeAdd(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
+long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
+static __inline__
+short _InterlockedExchangeAdd16(short volatile *_Addend, short _Value);
+__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
+__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
+static __inline__
+char _InterlockedExchangeAdd8(char volatile *_Addend, char _Value);
+static __inline__
+long __cdecl _InterlockedIncrement(long volatile *_Addend);
+static __inline__
+short _InterlockedIncrement16(short volatile *_Addend);
+static __inline__
+long _InterlockedOr(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedOr16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedOr8(char volatile *_Value, char _Mask);
+static __inline__
+long _InterlockedXor(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedXor16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedXor8(char volatile *_Value, char _Mask);
+void __cdecl _invpcid(unsigned int, void *);
+static __inline__
+unsigned long __cdecl _lrotl(unsigned long, int);
+static __inline__
+unsigned long __cdecl _lrotr(unsigned long, int);
+static __inline__
+static __inline__
+void _ReadBarrier(void);
+static __inline__
+void _ReadWriteBarrier(void);
+static __inline__
+void *_ReturnAddress(void);
+unsigned int _rorx_u32(unsigned int, const unsigned int);
+static __inline__
+unsigned int __cdecl _rotl(unsigned int _Value, int _Shift);
+static __inline__
+unsigned short _rotl16(unsigned short _Value, unsigned char _Shift);
+static __inline__
+unsigned __int64 __cdecl _rotl64(unsigned __int64 _Value, int _Shift);
+static __inline__
+unsigned char _rotl8(unsigned char _Value, unsigned char _Shift);
+static __inline__
+unsigned int __cdecl _rotr(unsigned int _Value, int _Shift);
+static __inline__
+unsigned short _rotr16(unsigned short _Value, unsigned char _Shift);
+static __inline__
+unsigned __int64 __cdecl _rotr64(unsigned __int64 _Value, int _Shift);
+static __inline__
+unsigned char _rotr8(unsigned char _Value, unsigned char _Shift);
+int _sarx_i32(int, unsigned int);
+#if __STDC_HOSTED__
+int __cdecl _setjmp(jmp_buf);
+#endif
+unsigned int _shlx_u32(unsigned int, unsigned int);
+unsigned int _shrx_u32(unsigned int, unsigned int);
+void _Store_HLERelease(long volatile *, long);
+void _Store64_HLERelease(__int64 volatile *, __int64);
+void _StorePointer_HLERelease(void *volatile *, void *);
+static __inline__
+void _WriteBarrier(void);
+unsigned __int32 xbegin(void);
+void _xend(void);
+static __inline__
+#define _XCR_XFEATURE_ENABLED_MASK 0
+unsigned __int64 __cdecl _xgetbv(unsigned int);
+void __cdecl _xsetbv(unsigned int, unsigned __int64);
+
+/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
+#ifdef __x86_64__
+void __addgsbyte(unsigned long, unsigned char);
+void __addgsdword(unsigned long, unsigned long);
+void __addgsqword(unsigned long, unsigned __int64);
+void __addgsword(unsigned long, unsigned short);
+static __inline__
+void __faststorefence(void);
+void __incgsbyte(unsigned long);
+void __incgsdword(unsigned long);
+void __incgsqword(unsigned long);
+void __incgsword(unsigned long);
+unsigned char __lwpins64(unsigned __int64, unsigned int, unsigned int);
+void __lwpval64(unsigned __int64, unsigned int, unsigned int);
+unsigned __int64 __lzcnt64(unsigned __int64);
+static __inline__
+void __movsq(unsigned long long *, unsigned long long const *, size_t);
+__int64 __mulh(__int64, __int64);
+static __inline__
+unsigned __int64 __popcnt64(unsigned __int64);
+static __inline__
+unsigned char __readgsbyte(unsigned long);
+static __inline__
+unsigned long __readgsdword(unsigned long);
+static __inline__
+unsigned __int64 __readgsqword(unsigned long);
+unsigned short __readgsword(unsigned long);
+unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
+ unsigned __int64 _HighPart,
+ unsigned char _Shift);
+unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
+ unsigned __int64 _HighPart,
+ unsigned char _Shift);
+static __inline__
+void __stosq(unsigned __int64 *, unsigned __int64, size_t);
+unsigned char __vmx_on(unsigned __int64 *);
+unsigned char __vmx_vmclear(unsigned __int64 *);
+unsigned char __vmx_vmlaunch(void);
+unsigned char __vmx_vmptrld(unsigned __int64 *);
+unsigned char __vmx_vmread(size_t, size_t *);
+unsigned char __vmx_vmresume(void);
+unsigned char __vmx_vmwrite(size_t, size_t);
+void __writegsbyte(unsigned long, unsigned char);
+void __writegsdword(unsigned long, unsigned long);
+void __writegsqword(unsigned long, unsigned __int64);
+void __writegsword(unsigned long, unsigned short);
+static __inline__
+unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _bittest64(__int64 const *, __int64);
+static __inline__
+unsigned char _bittestandcomplement64(__int64 *, __int64);
+static __inline__
+unsigned char _bittestandreset64(__int64 *, __int64);
+static __inline__
+unsigned char _bittestandset64(__int64 *, __int64);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
+long _InterlockedAnd_np(long volatile *_Value, long _Mask);
+short _InterlockedAnd16_np(short volatile *_Value, short _Mask);
+__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
+unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
+static __inline__
+unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
+long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
+ long _Comparand);
+unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_CompareandResult);
+unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+short _InterlockedCompareExchange16_np(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+__int64 _InterlockedCompareExchange64_HLEAcquire(__int64 volatile *, __int64,
+ __int64);
+__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
+ __int64);
+__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+void *_InterlockedCompareExchangePointer(void *volatile *_Destination,
+ void *_Exchange, void *_Comparand);
+void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
+ void *_Exchange, void *_Comparand);
+static __inline__
+__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
+void *_InterlockedExchangePointer(void *volatile *_Target, void *_Value);
+static __inline__
+__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
+long _InterlockedOr_np(long volatile *_Value, long _Mask);
+short _InterlockedOr16_np(short volatile *_Value, short _Mask);
+static __inline__
+__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedOr8_np(char volatile *_Value, char _Mask);
+long _InterlockedXor_np(long volatile *_Value, long _Mask);
+short _InterlockedXor16_np(short volatile *_Value, short _Mask);
+static __inline__
+__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedXor8_np(char volatile *_Value, char _Mask);
+static __inline__
+__int64 _mul128(__int64 _Multiplier, __int64 _Multiplicand,
+ __int64 *_HighProduct);
+unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
+__int64 _sarx_i64(__int64, unsigned int);
+#if __STDC_HOSTED__
+int __cdecl _setjmpex(jmp_buf);
+#endif
+unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
+unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
+/*
+ * Multiply two 64-bit integers and obtain a 64-bit result.
+ * The low-half is returned directly and the high half is in an out parameter.
+ */
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_umul128(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand,
+ unsigned __int64 *_HighProduct) {
+ unsigned __int128 _FullProduct =
+ (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
+ *_HighProduct = _FullProduct >> 64;
+ return _FullProduct;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__umulh(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand) {
+ unsigned __int128 _FullProduct =
+ (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
+ return _FullProduct >> 64;
+}
+
+#endif /* __x86_64__ */
+
+/*----------------------------------------------------------------------------*\
+|* Multiplication
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+__emul(int __in1, int __in2) {
+ return (__int64)__in1 * (__int64)__in2;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__emulu(unsigned int __in1, unsigned int __in2) {
+ return (unsigned __int64)__in1 * (unsigned __int64)__in2;
+}
+/*----------------------------------------------------------------------------*\
+|* Bit Twiddling
+\*----------------------------------------------------------------------------*/
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_rotl8(unsigned char _Value, unsigned char _Shift) {
+ _Shift &= 0x7;
+ return _Shift ? (_Value << _Shift) | (_Value >> (8 - _Shift)) : _Value;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_rotr8(unsigned char _Value, unsigned char _Shift) {
+ _Shift &= 0x7;
+ return _Shift ? (_Value >> _Shift) | (_Value << (8 - _Shift)) : _Value;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+_rotl16(unsigned short _Value, unsigned char _Shift) {
+ _Shift &= 0xf;
+ return _Shift ? (_Value << _Shift) | (_Value >> (16 - _Shift)) : _Value;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+_rotr16(unsigned short _Value, unsigned char _Shift) {
+ _Shift &= 0xf;
+ return _Shift ? (_Value >> _Shift) | (_Value << (16 - _Shift)) : _Value;
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rotl(unsigned int _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rotr(unsigned int _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+_lrotl(unsigned long _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+_lrotr(unsigned long _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
+}
+static
+__inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_rotl64(unsigned __int64 _Value, int _Shift) {
+ _Shift &= 0x3f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (64 - _Shift)) : _Value;
+}
+static
+__inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_rotr64(unsigned __int64 _Value, int _Shift) {
+ _Shift &= 0x3f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (64 - _Shift)) : _Value;
+}
+/*----------------------------------------------------------------------------*\
+|* Bit Counting and Testing
+\*----------------------------------------------------------------------------*/
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanForward(unsigned long *_Index, unsigned long _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = __builtin_ctzl(_Mask);
+ return 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanReverse(unsigned long *_Index, unsigned long _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = 31 - __builtin_clzl(_Mask);
+ return 1;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__popcnt16(unsigned short _Value) {
+ return __builtin_popcount((int)_Value);
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__popcnt(unsigned int _Value) {
+ return __builtin_popcount(_Value);
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittest(long const *_BitBase, long _BitPos) {
+ return (*_BitBase >> _BitPos) & 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandcomplement(long *_BitBase, long _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase ^ (1 << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandreset(long *_BitBase, long _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase & ~(1 << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandset(long *_BitBase, long _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase | (1 << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_interlockedbittestandset(long volatile *_BitBase, long _BitPos) {
+ long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_SEQ_CST);
+ return (_PrevVal >> _BitPos) & 1;
+}
+#ifdef __x86_64__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = __builtin_ctzll(_Mask);
+ return 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = 63 - __builtin_clzll(_Mask);
+ return 1;
+}
+static __inline__
+unsigned __int64 __DEFAULT_FN_ATTRS
+__popcnt64(unsigned __int64 _Value) {
+ return __builtin_popcountll(_Value);
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittest64(__int64 const *_BitBase, __int64 _BitPos) {
+ return (*_BitBase >> _BitPos) & 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandcomplement64(__int64 *_BitBase, __int64 _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase ^ (1ll << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandreset64(__int64 *_BitBase, __int64 _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase & ~(1ll << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandset64(__int64 *_BitBase, __int64 _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase | (1ll << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_interlockedbittestandset64(__int64 volatile *_BitBase, __int64 _BitPos) {
+ long long _PrevVal =
+ __atomic_fetch_or(_BitBase, 1ll << _BitPos, __ATOMIC_SEQ_CST);
+ return (_PrevVal >> _BitPos) & 1;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Add
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd8(char volatile *_Addend, char _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Sub
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub8(char volatile *_Subend, char _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub16(short volatile *_Subend, short _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub(long volatile *_Subend, long _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Increment
+\*----------------------------------------------------------------------------*/
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedIncrement16(short volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedIncrement64(__int64 volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Decrement
+\*----------------------------------------------------------------------------*/
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedDecrement16(short volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedDecrement64(__int64 volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked And
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedAnd8(char volatile *_Value, char _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedAnd16(short volatile *_Value, short _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedAnd(long volatile *_Value, long _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Or
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedOr8(char volatile *_Value, char _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedOr16(short volatile *_Value, short _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedOr(long volatile *_Value, long _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Xor
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedXor8(char volatile *_Value, char _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedXor16(short volatile *_Value, short _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedXor(long volatile *_Value, long _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchange8(char volatile *_Target, char _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+ return _Value;
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchange16(short volatile *_Target, short _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+ return _Value;
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+ return _Value;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Compare Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange8(char volatile *_Destination,
+ char _Exchange, char _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return _Comparand;
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange16(short volatile *_Destination,
+ short _Exchange, short _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return _Comparand;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange64(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return _Comparand;
+}
+/*----------------------------------------------------------------------------*\
+|* Barriers
+\*----------------------------------------------------------------------------*/
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_ReadWriteBarrier(void) {
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_ReadBarrier(void) {
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_WriteBarrier(void) {
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+__faststorefence(void) {
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* readfs, readgs
+|* (Pointers in address space #256 and #257 are relative to the GS and FS
+|* segment registers, respectively.)
+\*----------------------------------------------------------------------------*/
+#define __ptr_to_addr_space(__addr_space_nbr, __type, __offset) \
+ ((volatile __type __attribute__((__address_space__(__addr_space_nbr)))*) \
+ (__offset))
+
+#ifdef __i386__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+__readfsbyte(unsigned long __offset) {
+ return *__ptr_to_addr_space(257, unsigned char, __offset);
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readfsqword(unsigned long __offset) {
+ return *__ptr_to_addr_space(257, unsigned __int64, __offset);
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__readfsword(unsigned long __offset) {
+ return *__ptr_to_addr_space(257, unsigned short, __offset);
+}
+#endif
+#ifdef __x86_64__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+__readgsbyte(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned char, __offset);
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+__readgsdword(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned long, __offset);
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readgsqword(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned __int64, __offset);
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__readgsword(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned short, __offset);
+}
+#endif
+#undef __ptr_to_addr_space
+/*----------------------------------------------------------------------------*\
+|* movs, stos
+\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
+ __asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
+ __asm__("rep movsl" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
+ __asm__("rep movsw" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosb(unsigned char *__dst, unsigned char __x, size_t __n) {
+ __asm__("rep stosb" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
+ __asm__("rep stosl" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
+ __asm__("rep stosw" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+#endif
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
+ __asm__("rep movsq" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
+ __asm__("rep stosq" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+#endif
+
+/*----------------------------------------------------------------------------*\
+|* Misc
+\*----------------------------------------------------------------------------*/
+static __inline__ void * __DEFAULT_FN_ATTRS
+_AddressOfReturnAddress(void) {
+ return (void*)((char*)__builtin_frame_address(0) + sizeof(void*));
+}
+static __inline__ void * __DEFAULT_FN_ATTRS
+_ReturnAddress(void) {
+ return __builtin_return_address(0);
+}
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ void __DEFAULT_FN_ATTRS
+__cpuid(int __info[4], int __level) {
+ __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
+ : "a"(__level));
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__cpuidex(int __info[4], int __level, int __ecx) {
+ __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
+ : "a"(__level), "c"(__ecx));
+}
+static __inline__ unsigned __int64 __cdecl __DEFAULT_FN_ATTRS
+_xgetbv(unsigned int __xcr_no) {
+ unsigned int __eax, __edx;
+ __asm__ ("xgetbv" : "=a" (__eax), "=d" (__edx) : "c" (__xcr_no));
+ return ((unsigned __int64)__edx << 32) | __eax;
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__halt(void) {
+ __asm__ volatile ("hlt");
+}
+#endif
+
+/*----------------------------------------------------------------------------*\
+|* Privileged intrinsics
+\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readmsr(unsigned long __register) {
+ // Loads the contents of a 64-bit model specific register (MSR) specified in
+ // the ECX register into registers EDX:EAX. The EDX register is loaded with
+ // the high-order 32 bits of the MSR and the EAX register is loaded with the
+ // low-order 32 bits. If less than 64 bits are implemented in the MSR being
+ // read, the values returned to EDX:EAX in unimplemented bit locations are
+ // undefined.
+ unsigned long __edx;
+ unsigned long __eax;
+ __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register));
+ return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;
+}
+
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+__readcr3(void) {
+ unsigned long __cr3_val;
+ __asm__ __volatile__ ("mov %%cr3, %0" : "=q"(__cr3_val) : : "memory");
+ return __cr3_val;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+__writecr3(unsigned int __cr3_val) {
+ __asm__ ("mov %0, %%cr3" : : "q"(__cr3_val) : "memory");
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __INTRIN_H */
+#endif /* _MSC_VER */
diff --git a/clang-2690385/lib64/clang/3.8/include/__clang_cuda_runtime_wrapper.h b/clang-2690385/lib64/clang/3.8/include/__clang_cuda_runtime_wrapper.h
new file mode 100644
index 00000000..8e5f0331
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/__clang_cuda_runtime_wrapper.h
@@ -0,0 +1,216 @@
+/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * WARNING: This header is intended to be directly -include'd by
+ * the compiler and is not supposed to be included by users.
+ *
+ * CUDA headers are implemented in a way that currently makes it
+ * impossible for user code to #include directly when compiling with
+ * Clang. They present different view of CUDA-supplied functions
+ * depending on where in NVCC's compilation pipeline the headers are
+ * included. Neither of these modes provides function definitions with
+ * correct attributes, so we use preprocessor to force the headers
+ * into a form that Clang can use.
+ *
+ * Similarly to NVCC which -include's cuda_runtime.h, Clang -include's
+ * this file during every CUDA compilation.
+ */
+
+#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__
+#define __CLANG_CUDA_RUNTIME_WRAPPER_H__
+
+#if defined(__CUDA__) && defined(__clang__)
+
+// Include some standard headers to avoid CUDA headers including them
+// while some required macros (like __THROW) are in a weird state.
+#include <stdlib.h>
+#include <cmath>
+
+// Preserve common macros that will be changed below by us or by CUDA
+// headers.
+#pragma push_macro("__THROW")
+#pragma push_macro("__CUDA_ARCH__")
+
+// WARNING: Preprocessor hacks below are based on specific details of
+// CUDA-7.x headers and are not expected to work with any other
+// version of CUDA headers.
+#include "cuda.h"
+#if !defined(CUDA_VERSION)
+#error "cuda.h did not define CUDA_VERSION"
+#elif CUDA_VERSION < 7000 || CUDA_VERSION > 7050
+#error "Unsupported CUDA version!"
+#endif
+
+// Make largest subset of device functions available during host
+// compilation -- SM_35 for the time being.
+#ifndef __CUDA_ARCH__
+#define __CUDA_ARCH__ 350
+#endif
+
+#include "cuda_builtin_vars.h"
+
+// No need for device_launch_parameters.h as cuda_builtin_vars.h above
+// has taken care of builtin variables declared in the file.
+#define __DEVICE_LAUNCH_PARAMETERS_H__
+
+// {math,device}_functions.h only have declarations of the
+// functions. We don't need them as we're going to pull in their
+// definitions from .hpp files.
+#define __DEVICE_FUNCTIONS_H__
+#define __MATH_FUNCTIONS_H__
+
+#undef __CUDACC__
+#define __CUDABE__
+// Disables definitions of device-side runtime support stubs in
+// cuda_device_runtime_api.h
+#define __CUDADEVRT_INTERNAL__
+#include "host_config.h"
+#include "host_defines.h"
+#include "driver_types.h"
+#include "common_functions.h"
+#undef __CUDADEVRT_INTERNAL__
+
+#undef __CUDABE__
+#define __CUDACC__
+#include "cuda_runtime.h"
+
+#undef __CUDACC__
+#define __CUDABE__
+
+// CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does
+// not have at the moment. Emulate them with a builtin memcpy/memset.
+#define __nvvm_memcpy(s,d,n,a) __builtin_memcpy(s,d,n)
+#define __nvvm_memset(d,c,n,a) __builtin_memset(d,c,n)
+
+#include "crt/host_runtime.h"
+#include "crt/device_runtime.h"
+// device_runtime.h defines __cxa_* macros that will conflict with
+// cxxabi.h.
+// FIXME: redefine these as __device__ functions.
+#undef __cxa_vec_ctor
+#undef __cxa_vec_cctor
+#undef __cxa_vec_dtor
+#undef __cxa_vec_new2
+#undef __cxa_vec_new3
+#undef __cxa_vec_delete2
+#undef __cxa_vec_delete
+#undef __cxa_vec_delete3
+#undef __cxa_pure_virtual
+
+// We need decls for functions in CUDA's libdevice with __device__
+// attribute only. Alas they come either as __host__ __device__ or
+// with no attributes at all. To work around that, define __CUDA_RTC__
+// which produces HD variant and undef __host__ which gives us desided
+// decls with __device__ attribute.
+#pragma push_macro("__host__")
+#define __host__
+#define __CUDACC_RTC__
+#include "device_functions_decls.h"
+#undef __CUDACC_RTC__
+
+// Temporarily poison __host__ macro to ensure it's not used by any of
+// the headers we're about to include.
+#define __host__ UNEXPECTED_HOST_ATTRIBUTE
+
+// device_functions.hpp and math_functions*.hpp use 'static
+// __forceinline__' (with no __device__) for definitions of device
+// functions. Temporarily redefine __forceinline__ to include
+// __device__.
+#pragma push_macro("__forceinline__")
+#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
+#include "device_functions.hpp"
+#include "math_functions.hpp"
+#include "math_functions_dbl_ptx3.hpp"
+#pragma pop_macro("__forceinline__")
+
+// Pull in host-only functions that are only available when neither
+// __CUDACC__ nor __CUDABE__ are defined.
+#undef __MATH_FUNCTIONS_HPP__
+#undef __CUDABE__
+#include "math_functions.hpp"
+// Alas, additional overloads for these functions are hard to get to.
+// Considering that we only need these overloads for a few functions,
+// we can provide them here.
+static inline float rsqrt(float a) { return rsqrtf(a); }
+static inline float rcbrt(float a) { return rcbrtf(a); }
+static inline float sinpi(float a) { return sinpif(a); }
+static inline float cospi(float a) { return cospif(a); }
+static inline void sincospi(float a, float *b, float *c) {
+ return sincospi(a, b, c);
+}
+static inline float erfcinv(float a) { return erfcinvf(a); }
+static inline float normcdfinv(float a) { return normcdfinvf(a); }
+static inline float normcdf(float a) { return normcdff(a); }
+static inline float erfcx(float a) { return erfcxf(a); }
+
+// For some reason single-argument variant is not always declared by
+// CUDA headers. Alas, device_functions.hpp included below needs it.
+static inline __device__ void __brkpt(int c) { __brkpt(); }
+
+// Now include *.hpp with definitions of various GPU functions. Alas,
+// a lot of thins get declared/defined with __host__ attribute which
+// we don't want and we have to define it out. We also have to include
+// {device,math}_functions.hpp again in order to extract the other
+// branch of #if/else inside.
+
+#define __host__
+#undef __CUDABE__
+#define __CUDACC__
+#undef __DEVICE_FUNCTIONS_HPP__
+#include "device_functions.hpp"
+#include "device_atomic_functions.hpp"
+#include "sm_20_atomic_functions.hpp"
+#include "sm_32_atomic_functions.hpp"
+#include "sm_20_intrinsics.hpp"
+// sm_30_intrinsics.h has declarations that use default argument, so
+// we have to include it and it will in turn include .hpp
+#include "sm_30_intrinsics.h"
+#include "sm_32_intrinsics.hpp"
+#undef __MATH_FUNCTIONS_HPP__
+#include "math_functions.hpp"
+#pragma pop_macro("__host__")
+
+#include "texture_indirect_functions.h"
+
+// Restore state of __CUDA_ARCH__ and __THROW we had on entry.
+#pragma pop_macro("__CUDA_ARCH__")
+#pragma pop_macro("__THROW")
+
+// Set up compiler macros expected to be seen during compilation.
+#undef __CUDABE__
+#define __CUDACC__
+#define __NVCC__
+
+#if defined(__CUDA_ARCH__)
+// We need to emit IR declaration for non-existing __nvvm_reflect() to
+// let backend know that it should be treated as const nothrow
+// function which is what NVVMReflect pass expects to see.
+extern "C" __device__ __attribute__((const)) int __nvvm_reflect(const void *);
+static __device__ __attribute__((used)) int __nvvm_reflect_anchor() {
+ return __nvvm_reflect("NONE");
+}
+#endif
+
+#endif // __CUDA__
+#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__
diff --git a/clang-2690385/lib64/clang/3.8/include/__stddef_max_align_t.h b/clang-2690385/lib64/clang/3.8/include/__stddef_max_align_t.h
new file mode 100644
index 00000000..1e10ca98
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/__stddef_max_align_t.h
@@ -0,0 +1,43 @@
+/*===---- __stddef_max_align_t.h - Definition of max_align_t for modules ---===
+ *
+ * Copyright (c) 2014 Chandler Carruth
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_MAX_ALIGN_T_DEFINED
+#define __CLANG_MAX_ALIGN_T_DEFINED
+
+#if defined(_MSC_VER)
+typedef double max_align_t;
+#elif defined(__APPLE__)
+typedef long double max_align_t;
+#else
+// Define 'max_align_t' to match the GCC definition.
+typedef struct {
+ long long __clang_max_align_nonce1
+ __attribute__((__aligned__(__alignof__(long long))));
+ long double __clang_max_align_nonce2
+ __attribute__((__aligned__(__alignof__(long double))));
+} max_align_t;
+#endif
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/__wmmintrin_aes.h b/clang-2690385/lib64/clang/3.8/include/__wmmintrin_aes.h
new file mode 100644
index 00000000..100799eb
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/__wmmintrin_aes.h
@@ -0,0 +1,66 @@
+/*===---- __wmmintrin_aes.h - AES intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef _WMMINTRIN_AES_H
+#define _WMMINTRIN_AES_H
+
+#include <emmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes")))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesenc_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenc128(__V, __R);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesenclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenclast128(__V, __R);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesdec_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdec128(__V, __R);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesdeclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdeclast128(__V, __R);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesimc_si128(__m128i __V)
+{
+ return (__m128i)__builtin_ia32_aesimc128(__V);
+}
+
+#define _mm_aeskeygenassist_si128(C, R) \
+ (__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R))
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* _WMMINTRIN_AES_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/__wmmintrin_pclmul.h b/clang-2690385/lib64/clang/3.8/include/__wmmintrin_pclmul.h
new file mode 100644
index 00000000..68e944e9
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/__wmmintrin_pclmul.h
@@ -0,0 +1,30 @@
+/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef _WMMINTRIN_PCLMUL_H
+#define _WMMINTRIN_PCLMUL_H
+
+#define _mm_clmulepi64_si128(__X, __Y, __I) \
+ ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
+ (__v2di)(__m128i)(__Y), (char)(__I)))
+
+#endif /* _WMMINTRIN_PCLMUL_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/adxintrin.h b/clang-2690385/lib64/clang/3.8/include/adxintrin.h
new file mode 100644
index 00000000..ee347284
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/adxintrin.h
@@ -0,0 +1,86 @@
+/*===---- adxintrin.h - ADX intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <adxintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __ADXINTRIN_H
+#define __ADXINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+/* Intrinsics that are available only if __ADX__ defined */
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+_addcarryx_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
+}
+#endif
+
+/* Intrinsics that are also available if __ADX__ undefined */
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_addcarry_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_addcarry_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_addcarry_u64(__cf, __x, __y, __p);
+}
+#endif
+
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_subborrow_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_subborrow_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __ADXINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/altivec.h b/clang-2690385/lib64/clang/3.8/include/altivec.h
new file mode 100644
index 00000000..c39156ef
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/altivec.h
@@ -0,0 +1,13919 @@
+/*===---- altivec.h - Standard header for type generic math ---------------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __ALTIVEC_H
+#define __ALTIVEC_H
+
+#ifndef __ALTIVEC__
+#error "AltiVec support not enabled"
+#endif
+
+/* constants for mapping CR6 bits to predicate result. */
+
+#define __CR6_EQ 0
+#define __CR6_EQ_REV 1
+#define __CR6_LT 2
+#define __CR6_LT_REV 3
+
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+static vector signed char __ATTRS_o_ai vec_perm(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c);
+
+static vector unsigned char __ATTRS_o_ai vec_perm(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c);
+
+static vector bool char __ATTRS_o_ai vec_perm(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c);
+
+static vector short __ATTRS_o_ai vec_perm(vector signed short __a,
+ vector signed short __b,
+ vector unsigned char __c);
+
+static vector unsigned short __ATTRS_o_ai vec_perm(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned char __c);
+
+static vector bool short __ATTRS_o_ai vec_perm(vector bool short __a,
+ vector bool short __b,
+ vector unsigned char __c);
+
+static vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, vector pixel __b,
+ vector unsigned char __c);
+
+static vector int __ATTRS_o_ai vec_perm(vector signed int __a,
+ vector signed int __b,
+ vector unsigned char __c);
+
+static vector unsigned int __ATTRS_o_ai vec_perm(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned char __c);
+
+static vector bool int __ATTRS_o_ai vec_perm(vector bool int __a,
+ vector bool int __b,
+ vector unsigned char __c);
+
+static vector float __ATTRS_o_ai vec_perm(vector float __a, vector float __b,
+ vector unsigned char __c);
+
+#ifdef __VSX__
+static vector long long __ATTRS_o_ai vec_perm(vector signed long long __a,
+ vector signed long long __b,
+ vector unsigned char __c);
+
+static vector unsigned long long __ATTRS_o_ai
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c);
+
+static vector bool long long __ATTRS_o_ai
+vec_perm(vector bool long long __a, vector bool long long __b,
+ vector unsigned char __c);
+
+static vector double __ATTRS_o_ai vec_perm(vector double __a, vector double __b,
+ vector unsigned char __c);
+#endif
+
+static vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a,
+ vector unsigned char __b);
+
+/* vec_abs */
+
+#define __builtin_altivec_abs_v16qi vec_abs
+#define __builtin_altivec_abs_v8hi vec_abs
+#define __builtin_altivec_abs_v4si vec_abs
+
+static vector signed char __ATTRS_o_ai vec_abs(vector signed char __a) {
+ return __builtin_altivec_vmaxsb(__a, -__a);
+}
+
+static vector signed short __ATTRS_o_ai vec_abs(vector signed short __a) {
+ return __builtin_altivec_vmaxsh(__a, -__a);
+}
+
+static vector signed int __ATTRS_o_ai vec_abs(vector signed int __a) {
+ return __builtin_altivec_vmaxsw(__a, -__a);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed long long __ATTRS_o_ai
+vec_abs(vector signed long long __a) {
+ return __builtin_altivec_vmaxsd(__a, -__a);
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_abs(vector float __a) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF);
+ return (vector float)__res;
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector double __ATTRS_o_ai vec_abs(vector double __a) {
+ vector unsigned long long __res = { 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF };
+ __res &= (vector unsigned int)__a;
+ return (vector double)__res;
+}
+#endif
+
+/* vec_abss */
+#define __builtin_altivec_abss_v16qi vec_abss
+#define __builtin_altivec_abss_v8hi vec_abss
+#define __builtin_altivec_abss_v4si vec_abss
+
+static vector signed char __ATTRS_o_ai vec_abss(vector signed char __a) {
+ return __builtin_altivec_vmaxsb(
+ __a, __builtin_altivec_vsubsbs((vector signed char)(0), __a));
+}
+
+static vector signed short __ATTRS_o_ai vec_abss(vector signed short __a) {
+ return __builtin_altivec_vmaxsh(
+ __a, __builtin_altivec_vsubshs((vector signed short)(0), __a));
+}
+
+static vector signed int __ATTRS_o_ai vec_abss(vector signed int __a) {
+ return __builtin_altivec_vmaxsw(
+ __a, __builtin_altivec_vsubsws((vector signed int)(0), __a));
+}
+
+/* vec_add */
+
+static vector signed char __ATTRS_o_ai vec_add(vector signed char __a,
+ vector signed char __b) {
+ return __a + __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_add(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a + __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_add(vector signed char __a,
+ vector bool char __b) {
+ return __a + (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_add(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a + __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_add(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a + __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_add(vector unsigned char __a,
+ vector bool char __b) {
+ return __a + (vector unsigned char)__b;
+}
+
+static vector short __ATTRS_o_ai vec_add(vector short __a, vector short __b) {
+ return __a + __b;
+}
+
+static vector short __ATTRS_o_ai vec_add(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a + __b;
+}
+
+static vector short __ATTRS_o_ai vec_add(vector short __a,
+ vector bool short __b) {
+ return __a + (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_add(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a + __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_add(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a + __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_add(vector unsigned short __a,
+ vector bool short __b) {
+ return __a + (vector unsigned short)__b;
+}
+
+static vector int __ATTRS_o_ai vec_add(vector int __a, vector int __b) {
+ return __a + __b;
+}
+
+static vector int __ATTRS_o_ai vec_add(vector bool int __a, vector int __b) {
+ return (vector int)__a + __b;
+}
+
+static vector int __ATTRS_o_ai vec_add(vector int __a, vector bool int __b) {
+ return __a + (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_add(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a + __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_add(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a + __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_add(vector unsigned int __a,
+ vector bool int __b) {
+ return __a + (vector unsigned int)__b;
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed long long __ATTRS_o_ai
+vec_add(vector signed long long __a, vector signed long long __b) {
+ return __a + __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_add(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a + __b;
+}
+
+static vector signed __int128 __ATTRS_o_ai vec_add(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __a + __b;
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a + __b;
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+static vector float __ATTRS_o_ai vec_add(vector float __a, vector float __b) {
+ return __a + __b;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_add(vector double __a, vector double __b) {
+ return __a + __b;
+}
+#endif // __VSX__
+
+/* vec_adde */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai
+vec_adde(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+#endif
+
+/* vec_addec */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai
+vec_addec(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+#endif
+
+/* vec_vaddubm */
+
+#define __builtin_altivec_vaddubm vec_vaddubm
+
+static vector signed char __ATTRS_o_ai vec_vaddubm(vector signed char __a,
+ vector signed char __b) {
+ return __a + __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vaddubm(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a + __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vaddubm(vector signed char __a,
+ vector bool char __b) {
+ return __a + (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubm(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a + __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubm(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a + __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubm(vector unsigned char __a,
+ vector bool char __b) {
+ return __a + (vector unsigned char)__b;
+}
+
+/* vec_vadduhm */
+
+#define __builtin_altivec_vadduhm vec_vadduhm
+
+static vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
+ vector short __b) {
+ return __a + __b;
+}
+
+static vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a + __b;
+}
+
+static vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
+ vector bool short __b) {
+ return __a + (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector unsigned short __a, vector unsigned short __b) {
+ return __a + __b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a + __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vadduhm(vector unsigned short __a,
+ vector bool short __b) {
+ return __a + (vector unsigned short)__b;
+}
+
+/* vec_vadduwm */
+
+#define __builtin_altivec_vadduwm vec_vadduwm
+
+static vector int __ATTRS_o_ai vec_vadduwm(vector int __a, vector int __b) {
+ return __a + __b;
+}
+
+static vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a + __b;
+}
+
+static vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
+ vector bool int __b) {
+ return __a + (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vadduwm(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a + __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a + __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vadduwm(vector unsigned int __a,
+ vector bool int __b) {
+ return __a + (vector unsigned int)__b;
+}
+
+/* vec_vaddfp */
+
+#define __builtin_altivec_vaddfp vec_vaddfp
+
+static vector float __attribute__((__always_inline__))
+vec_vaddfp(vector float __a, vector float __b) {
+ return __a + __b;
+}
+
+/* vec_addc */
+
+static vector signed int __ATTRS_o_ai vec_addc(vector signed int __a,
+ vector signed int __b) {
+ return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_addc(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vaddcuw(__a, __b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai
+vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
+ return (vector signed __int128)__builtin_altivec_vaddcuq(
+ (vector unsigned __int128)__a,
+ (vector unsigned __int128)__b);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vaddcuq(__a, __b);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_vaddcuw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vaddcuw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vaddcuw(__a, __b);
+}
+
+/* vec_adds */
+
+static vector signed char __ATTRS_o_ai vec_adds(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vaddsbs(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_adds(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_adds(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_adds(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vaddubs(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_adds(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_adds(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
+}
+
+static vector short __ATTRS_o_ai vec_adds(vector short __a, vector short __b) {
+ return __builtin_altivec_vaddshs(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_adds(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vaddshs((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_adds(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vaddshs(__a, (vector short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_adds(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vadduhs(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_adds(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_adds(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
+}
+
+static vector int __ATTRS_o_ai vec_adds(vector int __a, vector int __b) {
+ return __builtin_altivec_vaddsws(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_adds(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vaddsws((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_adds(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vaddsws(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_adds(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vadduws(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_adds(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_adds(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
+}
+
+/* vec_vaddsbs */
+
+static vector signed char __ATTRS_o_ai vec_vaddsbs(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vaddsbs(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vaddsbs(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vaddsbs(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
+}
+
+/* vec_vaddubs */
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubs(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vaddubs(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubs(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubs(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
+}
+
+/* vec_vaddshs */
+
+static vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vaddshs(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vaddshs((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vaddshs(__a, (vector short)__b);
+}
+
+/* vec_vadduhs */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vadduhs(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vadduhs(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
+}
+
+/* vec_vaddsws */
+
+static vector int __ATTRS_o_ai vec_vaddsws(vector int __a, vector int __b) {
+ return __builtin_altivec_vaddsws(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vaddsws((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vaddsws(__a, (vector int)__b);
+}
+
+/* vec_vadduws */
+
+static vector unsigned int __ATTRS_o_ai vec_vadduws(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vadduws(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vadduws(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vadduws(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+/* vec_vadduqm */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) {
+ return __a + __b;
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a + __b;
+}
+
+/* vec_vaddeuqm */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+/* vec_vaddcuq */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
+ return __builtin_altivec_vaddcuq(__a, __b);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vaddcuq(__a, __b);
+}
+
+/* vec_vaddecuq */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_and */
+
+#define __builtin_altivec_vand vec_and
+
+static vector signed char __ATTRS_o_ai vec_and(vector signed char __a,
+ vector signed char __b) {
+ return __a & __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_and(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a & __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_and(vector signed char __a,
+ vector bool char __b) {
+ return __a & (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_and(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a & __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_and(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a & __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_and(vector unsigned char __a,
+ vector bool char __b) {
+ return __a & (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_and(vector bool char __a,
+ vector bool char __b) {
+ return __a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_and(vector short __a, vector short __b) {
+ return __a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_and(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_and(vector short __a,
+ vector bool short __b) {
+ return __a & (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_and(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a & __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_and(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a & __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_and(vector unsigned short __a,
+ vector bool short __b) {
+ return __a & (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_and(vector bool short __a,
+ vector bool short __b) {
+ return __a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_and(vector int __a, vector int __b) {
+ return __a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_and(vector bool int __a, vector int __b) {
+ return (vector int)__a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_and(vector int __a, vector bool int __b) {
+ return __a & (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_and(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a & __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_and(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a & __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_and(vector unsigned int __a,
+ vector bool int __b) {
+ return __a & (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_and(vector bool int __a,
+ vector bool int __b) {
+ return __a & __b;
+}
+
+static vector float __ATTRS_o_ai vec_and(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_and(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_and(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_and(vector bool long long __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_and(vector double __a, vector bool long long __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_and(vector double __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_and(vector signed long long __a, vector signed long long __b) {
+ return __a & __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_and(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & __b;
+}
+
+static vector signed long long __ATTRS_o_ai vec_and(vector signed long long __a,
+ vector bool long long __b) {
+ return __a & (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_and(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_and(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_and(vector unsigned long long __a, vector bool long long __b) {
+ return __a & (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_and(vector bool long long __a,
+ vector bool long long __b) {
+ return __a & __b;
+}
+#endif
+
+/* vec_vand */
+
+static vector signed char __ATTRS_o_ai vec_vand(vector signed char __a,
+ vector signed char __b) {
+ return __a & __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vand(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a & __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vand(vector signed char __a,
+ vector bool char __b) {
+ return __a & (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vand(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a & __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vand(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a & __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vand(vector unsigned char __a,
+ vector bool char __b) {
+ return __a & (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_vand(vector bool char __a,
+ vector bool char __b) {
+ return __a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_vand(vector short __a, vector short __b) {
+ return __a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_vand(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_vand(vector short __a,
+ vector bool short __b) {
+ return __a & (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vand(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a & __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vand(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a & __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vand(vector unsigned short __a,
+ vector bool short __b) {
+ return __a & (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_vand(vector bool short __a,
+ vector bool short __b) {
+ return __a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_vand(vector int __a, vector int __b) {
+ return __a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_vand(vector bool int __a, vector int __b) {
+ return (vector int)__a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_vand(vector int __a, vector bool int __b) {
+ return __a & (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vand(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a & __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vand(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a & __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vand(vector unsigned int __a,
+ vector bool int __b) {
+ return __a & (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_vand(vector bool int __a,
+ vector bool int __b) {
+ return __a & __b;
+}
+
+static vector float __ATTRS_o_ai vec_vand(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vand(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vand(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_vand(vector signed long long __a, vector signed long long __b) {
+ return __a & __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vand(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vand(vector signed long long __a, vector bool long long __b) {
+ return __a & (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vand(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vand(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vand(vector unsigned long long __a, vector bool long long __b) {
+ return __a & (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_vand(vector bool long long __a,
+ vector bool long long __b) {
+ return __a & __b;
+}
+#endif
+
+/* vec_andc */
+
+#define __builtin_altivec_vandc vec_andc
+
+static vector signed char __ATTRS_o_ai vec_andc(vector signed char __a,
+ vector signed char __b) {
+ return __a & ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_andc(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a & ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_andc(vector signed char __a,
+ vector bool char __b) {
+ return __a & ~(vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_andc(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_andc(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a & ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_andc(vector unsigned char __a,
+ vector bool char __b) {
+ return __a & ~(vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_andc(vector bool char __a,
+ vector bool char __b) {
+ return __a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_andc(vector short __a, vector short __b) {
+ return __a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_andc(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_andc(vector short __a,
+ vector bool short __b) {
+ return __a & ~(vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_andc(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_andc(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a & ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_andc(vector unsigned short __a,
+ vector bool short __b) {
+ return __a & ~(vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_andc(vector bool short __a,
+ vector bool short __b) {
+ return __a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_andc(vector int __a, vector int __b) {
+ return __a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_andc(vector bool int __a, vector int __b) {
+ return (vector int)__a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_andc(vector int __a, vector bool int __b) {
+ return __a & ~(vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_andc(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_andc(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a & ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_andc(vector unsigned int __a,
+ vector bool int __b) {
+ return __a & ~(vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_andc(vector bool int __a,
+ vector bool int __b) {
+ return __a & ~__b;
+}
+
+static vector float __ATTRS_o_ai vec_andc(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_andc(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_andc(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai
+vec_andc(vector double __a, vector bool long long __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_andc(vector double __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_andc(vector signed long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_andc(vector signed long long __a, vector bool long long __b) {
+ return __a & ~(vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_andc(vector unsigned long long __a, vector bool long long __b) {
+ return __a & ~(vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_andc(vector bool long long __a,
+ vector bool long long __b) {
+ return __a & ~__b;
+}
+#endif
+
+/* vec_vandc */
+
+static vector signed char __ATTRS_o_ai vec_vandc(vector signed char __a,
+ vector signed char __b) {
+ return __a & ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vandc(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a & ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vandc(vector signed char __a,
+ vector bool char __b) {
+ return __a & ~(vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vandc(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vandc(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a & ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vandc(vector unsigned char __a,
+ vector bool char __b) {
+ return __a & ~(vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_vandc(vector bool char __a,
+ vector bool char __b) {
+ return __a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_vandc(vector short __a, vector short __b) {
+ return __a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_vandc(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_vandc(vector short __a,
+ vector bool short __b) {
+ return __a & ~(vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vandc(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vandc(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a & ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vandc(vector unsigned short __a,
+ vector bool short __b) {
+ return __a & ~(vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_vandc(vector bool short __a,
+ vector bool short __b) {
+ return __a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_vandc(vector int __a, vector int __b) {
+ return __a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_vandc(vector bool int __a, vector int __b) {
+ return (vector int)__a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_vandc(vector int __a, vector bool int __b) {
+ return __a & ~(vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vandc(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vandc(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a & ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vandc(vector unsigned int __a,
+ vector bool int __b) {
+ return __a & ~(vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a,
+ vector bool int __b) {
+ return __a & ~__b;
+}
+
+static vector float __ATTRS_o_ai vec_vandc(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vandc(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vandc(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_vandc(vector signed long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vandc(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vandc(vector signed long long __a, vector bool long long __b) {
+ return __a & ~(vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vandc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vandc(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vandc(vector unsigned long long __a, vector bool long long __b) {
+ return __a & ~(vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_vandc(vector bool long long __a,
+ vector bool long long __b) {
+ return __a & ~__b;
+}
+#endif
+
+/* vec_avg */
+
+static vector signed char __ATTRS_o_ai vec_avg(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vavgsb(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_avg(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vavgub(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_avg(vector short __a, vector short __b) {
+ return __builtin_altivec_vavgsh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_avg(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vavguh(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_avg(vector int __a, vector int __b) {
+ return __builtin_altivec_vavgsw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_avg(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vavguw(__a, __b);
+}
+
+/* vec_vavgsb */
+
+static vector signed char __attribute__((__always_inline__))
+vec_vavgsb(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vavgsb(__a, __b);
+}
+
+/* vec_vavgub */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vavgub(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vavgub(__a, __b);
+}
+
+/* vec_vavgsh */
+
+static vector short __attribute__((__always_inline__))
+vec_vavgsh(vector short __a, vector short __b) {
+ return __builtin_altivec_vavgsh(__a, __b);
+}
+
+/* vec_vavguh */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vavguh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vavguh(__a, __b);
+}
+
+/* vec_vavgsw */
+
+static vector int __attribute__((__always_inline__))
+vec_vavgsw(vector int __a, vector int __b) {
+ return __builtin_altivec_vavgsw(__a, __b);
+}
+
+/* vec_vavguw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vavguw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vavguw(__a, __b);
+}
+
+/* vec_ceil */
+
+static vector float __ATTRS_o_ai vec_ceil(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspip(__a);
+#else
+ return __builtin_altivec_vrfip(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_ceil(vector double __a) {
+ return __builtin_vsx_xvrdpip(__a);
+}
+#endif
+
+/* vec_vrfip */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfip(vector float __a) {
+ return __builtin_altivec_vrfip(__a);
+}
+
+/* vec_cmpb */
+
+static vector int __attribute__((__always_inline__))
+vec_cmpb(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp(__a, __b);
+}
+
+/* vec_vcmpbfp */
+
+static vector int __attribute__((__always_inline__))
+vec_vcmpbfp(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp(__a, __b);
+}
+
+/* vec_cmpeq */
+
+static vector bool char __ATTRS_o_ai vec_cmpeq(vector signed char __a,
+ vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
+ (vector char)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_cmpeq(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
+ (vector char)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a,
+ vector short __b) {
+ return (vector bool short)__builtin_altivec_vcmpequh(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmpeq(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
+ (vector short)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a, vector int __b) {
+ return (vector bool int)__builtin_altivec_vcmpequw(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmpeq(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpequd(__a, __b);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpequd(
+ (vector long long)__a, (vector long long)__b);
+}
+#endif
+
+static vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b);
+}
+#endif
+
+
+/* vec_cmpgt */
+
+static vector bool char __ATTRS_o_ai vec_cmpgt(vector signed char __a,
+ vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_cmpgt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a,
+ vector short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmpgt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a, vector int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmpgt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b);
+}
+#endif
+
+static vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
+}
+#endif
+
+/* vec_cmpge */
+
+static vector bool char __ATTRS_o_ai
+vec_cmpge (vector signed char __a, vector signed char __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmpge (vector unsigned char __a, vector unsigned char __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpge (vector signed short __a, vector signed short __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpge (vector unsigned short __a, vector unsigned short __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge (vector signed int __a, vector signed int __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge (vector unsigned int __a, vector unsigned int __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector signed long long __a, vector signed long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+#endif
+
+/* vec_vcmpgefp */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgefp(vector float __a, vector float __b) {
+ return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+}
+
+/* vec_vcmpgtsb */
+
+static vector bool char __attribute__((__always_inline__))
+vec_vcmpgtsb(vector signed char __a, vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
+}
+
+/* vec_vcmpgtub */
+
+static vector bool char __attribute__((__always_inline__))
+vec_vcmpgtub(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
+}
+
+/* vec_vcmpgtsh */
+
+static vector bool short __attribute__((__always_inline__))
+vec_vcmpgtsh(vector short __a, vector short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
+}
+
+/* vec_vcmpgtuh */
+
+static vector bool short __attribute__((__always_inline__))
+vec_vcmpgtuh(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
+}
+
+/* vec_vcmpgtsw */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgtsw(vector int __a, vector int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
+}
+
+/* vec_vcmpgtuw */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgtuw(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
+}
+
+/* vec_vcmpgtfp */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgtfp(vector float __a, vector float __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
+}
+
+/* vec_cmple */
+
+static vector bool char __ATTRS_o_ai
+vec_cmple (vector signed char __a, vector signed char __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmple (vector unsigned char __a, vector unsigned char __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmple (vector signed short __a, vector signed short __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmple (vector unsigned short __a, vector unsigned short __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmple (vector signed int __a, vector signed int __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmple (vector unsigned int __a, vector unsigned int __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmple(vector float __a, vector float __b) {
+ return vec_cmpge(__b, __a);
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmple(vector double __a, vector double __b) {
+ return vec_cmpge(__b, __a);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmple(vector signed long long __a, vector signed long long __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_cmpge(__b, __a);
+}
+#endif
+
+/* vec_cmplt */
+
+static vector bool char __ATTRS_o_ai vec_cmplt(vector signed char __a,
+ vector signed char __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool char __ATTRS_o_ai vec_cmplt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
+ vector short __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmplt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmplt(vector int __a, vector int __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmplt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
+ vector float __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmplt(vector double __a, vector double __b) {
+ return vec_cmpgt(__b, __a);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmplt(vector signed long long __a, vector signed long long __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+/* vec_cntlz */
+
+static vector signed char __ATTRS_o_ai vec_cntlz(vector signed char __a) {
+ return __builtin_altivec_vclzb(__a);
+}
+static vector unsigned char __ATTRS_o_ai vec_cntlz(vector unsigned char __a) {
+ return __builtin_altivec_vclzb(__a);
+}
+static vector signed short __ATTRS_o_ai vec_cntlz(vector signed short __a) {
+ return __builtin_altivec_vclzh(__a);
+}
+static vector unsigned short __ATTRS_o_ai vec_cntlz(vector unsigned short __a) {
+ return __builtin_altivec_vclzh(__a);
+}
+static vector signed int __ATTRS_o_ai vec_cntlz(vector signed int __a) {
+ return __builtin_altivec_vclzw(__a);
+}
+static vector unsigned int __ATTRS_o_ai vec_cntlz(vector unsigned int __a) {
+ return __builtin_altivec_vclzw(__a);
+}
+static vector signed long long __ATTRS_o_ai
+vec_cntlz(vector signed long long __a) {
+ return __builtin_altivec_vclzd(__a);
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_cntlz(vector unsigned long long __a) {
+ return __builtin_altivec_vclzd(__a);
+}
+#endif
+
+/* vec_cpsgn */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai vec_cpsgn(vector float __a, vector float __b) {
+ return __builtin_vsx_xvcpsgnsp(__a, __b);
+}
+
+static vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcpsgndp(__a, __b);
+}
+#endif
+
+/* vec_ctf */
+
+static vector float __ATTRS_o_ai vec_ctf(vector int __a, int __b) {
+ return __builtin_altivec_vcfsx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_ctf(vector unsigned int __a, int __b) {
+ return __builtin_altivec_vcfux((vector int)__a, __b);
+}
+
+/* vec_vcfsx */
+
+static vector float __attribute__((__always_inline__))
+vec_vcfsx(vector int __a, int __b) {
+ return __builtin_altivec_vcfsx(__a, __b);
+}
+
+/* vec_vcfux */
+
+static vector float __attribute__((__always_inline__))
+vec_vcfux(vector unsigned int __a, int __b) {
+ return __builtin_altivec_vcfux((vector int)__a, __b);
+}
+
+/* vec_cts */
+
+static vector int __attribute__((__always_inline__))
+vec_cts(vector float __a, int __b) {
+ return __builtin_altivec_vctsxs(__a, __b);
+}
+
+/* vec_vctsxs */
+
+static vector int __attribute__((__always_inline__))
+vec_vctsxs(vector float __a, int __b) {
+ return __builtin_altivec_vctsxs(__a, __b);
+}
+
+/* vec_ctu */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_ctu(vector float __a, int __b) {
+ return __builtin_altivec_vctuxs(__a, __b);
+}
+
+/* vec_vctuxs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vctuxs(vector float __a, int __b) {
+ return __builtin_altivec_vctuxs(__a, __b);
+}
+
+/* vec_double */
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_double (vector signed long long __a) {
+ vector double __ret = { __a[0], __a[1] };
+ return __ret;
+}
+
+static vector double __ATTRS_o_ai vec_double (vector unsigned long long __a) {
+ vector double __ret = { __a[0], __a[1] };
+ return __ret;
+}
+#endif
+
+/* vec_div */
+
+/* Integer vector divides (vectors are scalarized, elements divided
+ and the vectors reassembled).
+*/
+static vector signed char __ATTRS_o_ai vec_div(vector signed char __a,
+ vector signed char __b) {
+ return __a / __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_div(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a / __b;
+}
+
+static vector signed short __ATTRS_o_ai vec_div(vector signed short __a,
+ vector signed short __b) {
+ return __a / __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_div(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a / __b;
+}
+
+static vector signed int __ATTRS_o_ai vec_div(vector signed int __a,
+ vector signed int __b) {
+ return __a / __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_div(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a / __b;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_div(vector signed long long __a, vector signed long long __b) {
+ return __a / __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_div(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a / __b;
+}
+
+static vector float __ATTRS_o_ai vec_div(vector float __a, vector float __b) {
+ return __a / __b;
+}
+
+static vector double __ATTRS_o_ai vec_div(vector double __a,
+ vector double __b) {
+ return __a / __b;
+}
+#endif
+
+/* vec_dss */
+
+static void __attribute__((__always_inline__)) vec_dss(int __a) {
+ __builtin_altivec_dss(__a);
+}
+
+/* vec_dssall */
+
+static void __attribute__((__always_inline__)) vec_dssall(void) {
+ __builtin_altivec_dssall();
+}
+
+/* vec_dst */
+
+static void __attribute__((__always_inline__))
+vec_dst(const void *__a, int __b, int __c) {
+ __builtin_altivec_dst(__a, __b, __c);
+}
+
+/* vec_dstst */
+
+static void __attribute__((__always_inline__))
+vec_dstst(const void *__a, int __b, int __c) {
+ __builtin_altivec_dstst(__a, __b, __c);
+}
+
+/* vec_dststt */
+
+static void __attribute__((__always_inline__))
+vec_dststt(const void *__a, int __b, int __c) {
+ __builtin_altivec_dststt(__a, __b, __c);
+}
+
+/* vec_dstt */
+
+static void __attribute__((__always_inline__))
+vec_dstt(const void *__a, int __b, int __c) {
+ __builtin_altivec_dstt(__a, __b, __c);
+}
+
+/* vec_eqv */
+
+#ifdef __POWER8_VECTOR__
+static vector signed char __ATTRS_o_ai vec_eqv(vector signed char __a,
+ vector signed char __b) {
+ return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_eqv(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
+ vector bool char __b) {
+ return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
+ vector signed short __b) {
+ return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_eqv(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_eqv(vector bool short __a,
+ vector bool short __b) {
+ return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector signed int __ATTRS_o_ai vec_eqv(vector signed int __a,
+ vector signed int __b) {
+ return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_eqv(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_vsx_xxleqv(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
+ vector bool int __b) {
+ return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_eqv(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)
+ __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)
+ __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_eqv(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)
+ __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_eqv(vector float __a, vector float __b) {
+ return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector double __ATTRS_o_ai vec_eqv(vector double __a,
+ vector double __b) {
+ return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+#endif
+
+/* vec_expte */
+
+static vector float __attribute__((__always_inline__))
+vec_expte(vector float __a) {
+ return __builtin_altivec_vexptefp(__a);
+}
+
+/* vec_vexptefp */
+
+static vector float __attribute__((__always_inline__))
+vec_vexptefp(vector float __a) {
+ return __builtin_altivec_vexptefp(__a);
+}
+
+/* vec_floor */
+
+static vector float __ATTRS_o_ai vec_floor(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspim(__a);
+#else
+ return __builtin_altivec_vrfim(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_floor(vector double __a) {
+ return __builtin_vsx_xvrdpim(__a);
+}
+#endif
+
+/* vec_vrfim */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfim(vector float __a) {
+ return __builtin_altivec_vrfim(__a);
+}
+
+/* vec_ld */
+
+static vector signed char __ATTRS_o_ai vec_ld(int __a,
+ const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_ld(int __a, const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ld(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_ld(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_ld(int __a,
+ const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_ld(int __a, const vector short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_ld(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ld(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_ld(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_ld(int __a,
+ const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector pixel __ATTRS_o_ai vec_ld(int __a, const vector pixel *__b) {
+ return (vector pixel)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_ld(int __a, const vector int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_ld(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_ld(int __a,
+ const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_ld(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_ld(int __a,
+ const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_ld(int __a, const vector float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_ld(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+/* vec_lvx */
+
+static vector signed char __ATTRS_o_ai vec_lvx(int __a,
+ const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_lvx(int __a,
+ const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvx(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvx(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_lvx(int __a,
+ const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_lvx(int __a, const vector short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_lvx(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvx(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvx(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_lvx(int __a,
+ const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector pixel __ATTRS_o_ai vec_lvx(int __a, const vector pixel *__b) {
+ return (vector pixel)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_lvx(int __a, const vector int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_lvx(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvx(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvx(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_lvx(int __a,
+ const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lvx(int __a, const vector float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lvx(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+/* vec_lde */
+
+static vector signed char __ATTRS_o_ai vec_lde(int __a,
+ const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvebx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lde(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_lde(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvehx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lde(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_lde(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lde(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lde(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvewx(__a, __b);
+}
+
+/* vec_lvebx */
+
+static vector signed char __ATTRS_o_ai vec_lvebx(int __a,
+ const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvebx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvebx(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
+}
+
+/* vec_lvehx */
+
+static vector short __ATTRS_o_ai vec_lvehx(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvehx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvehx(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
+}
+
+/* vec_lvewx */
+
+static vector int __ATTRS_o_ai vec_lvewx(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvewx(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lvewx(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvewx(__a, __b);
+}
+
+/* vec_ldl */
+
+static vector signed char __ATTRS_o_ai vec_ldl(int __a,
+ const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_ldl(int __a,
+ const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ldl(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_ldl(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_ldl(int __a,
+ const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_ldl(int __a, const vector short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_ldl(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ldl(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_ldl(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_ldl(int __a,
+ const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector pixel __ATTRS_o_ai vec_ldl(int __a, const vector pixel *__b) {
+ return (vector pixel short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_ldl(int __a, const vector int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_ldl(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_ldl(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_ldl(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_ldl(int __a,
+ const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_ldl(int __a, const vector float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_ldl(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+/* vec_lvxl */
+
+static vector signed char __ATTRS_o_ai vec_lvxl(int __a,
+ const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_lvxl(int __a,
+ const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvxl(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvxl(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_lvxl(int __a,
+ const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_lvxl(int __a, const vector short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_lvxl(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvxl(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvxl(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_lvxl(int __a,
+ const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector pixel __ATTRS_o_ai vec_lvxl(int __a, const vector pixel *__b) {
+ return (vector pixel)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_lvxl(int __a, const vector int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_lvxl(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvxl(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvxl(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_lvxl(int __a,
+ const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lvxl(int __a, const vector float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lvxl(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+/* vec_loge */
+
+static vector float __attribute__((__always_inline__))
+vec_loge(vector float __a) {
+ return __builtin_altivec_vlogefp(__a);
+}
+
+/* vec_vlogefp */
+
+static vector float __attribute__((__always_inline__))
+vec_vlogefp(vector float __a) {
+ return __builtin_altivec_vlogefp(__a);
+}
+
+/* vec_lvsl */
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const signed char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const signed char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, const short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, const int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const float *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, const float *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+/* vec_lvsr */
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const signed char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const signed char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const float *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const float *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+/* vec_madd */
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector signed short, vector signed short, vector signed short);
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector signed short, vector unsigned short, vector unsigned short);
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector unsigned short, vector signed short, vector signed short);
+static vector unsigned short __ATTRS_o_ai
+vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short);
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector signed short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector unsigned short __a, vector signed short __b,
+ vector signed short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_madd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector float __ATTRS_o_ai
+vec_madd(vector float __a, vector float __b, vector float __c) {
+#ifdef __VSX__
+ return __builtin_vsx_xvmaddasp(__a, __b, __c);
+#else
+ return __builtin_altivec_vmaddfp(__a, __b, __c);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_madd(vector double __a, vector double __b, vector double __c) {
+ return __builtin_vsx_xvmaddadp(__a, __b, __c);
+}
+#endif
+
+/* vec_vmaddfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vmaddfp(vector float __a, vector float __b, vector float __c) {
+ return __builtin_altivec_vmaddfp(__a, __b, __c);
+}
+
+/* vec_madds */
+
+static vector signed short __attribute__((__always_inline__))
+vec_madds(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __builtin_altivec_vmhaddshs(__a, __b, __c);
+}
+
+/* vec_vmhaddshs */
+static vector signed short __attribute__((__always_inline__))
+vec_vmhaddshs(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __builtin_altivec_vmhaddshs(__a, __b, __c);
+}
+
+/* vec_msub */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai
+vec_msub(vector float __a, vector float __b, vector float __c) {
+ return __builtin_vsx_xvmsubasp(__a, __b, __c);
+}
+
+static vector double __ATTRS_o_ai
+vec_msub(vector double __a, vector double __b, vector double __c) {
+ return __builtin_vsx_xvmsubadp(__a, __b, __c);
+}
+#endif
+
+/* vec_max */
+
+static vector signed char __ATTRS_o_ai vec_max(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vmaxsb(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_max(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_max(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_max(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vmaxub(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_max(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_max(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
+}
+
+static vector short __ATTRS_o_ai vec_max(vector short __a, vector short __b) {
+ return __builtin_altivec_vmaxsh(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_max(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vmaxsh((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_max(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vmaxsh(__a, (vector short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_max(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_max(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_max(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
+}
+
+static vector int __ATTRS_o_ai vec_max(vector int __a, vector int __b) {
+ return __builtin_altivec_vmaxsw(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_max(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vmaxsw((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_max(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vmaxsw(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_max(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_max(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_max(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_max(vector signed long long __a, vector signed long long __b) {
+ return __builtin_altivec_vmaxsd(__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_max(vector bool long long __a, vector signed long long __b) {
+ return __builtin_altivec_vmaxsd((vector signed long long)__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai vec_max(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vmaxsd(__a, (vector signed long long)__b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_max(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vmaxud(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_max(vector bool long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vmaxud((vector unsigned long long)__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_max(vector unsigned long long __a, vector bool long long __b) {
+ return __builtin_altivec_vmaxud(__a, (vector unsigned long long)__b);
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_max(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvmaxsp(__a, __b);
+#else
+ return __builtin_altivec_vmaxfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_max(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvmaxdp(__a, __b);
+}
+#endif
+
+/* vec_vmaxsb */
+
+static vector signed char __ATTRS_o_ai vec_vmaxsb(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vmaxsb(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vmaxsb(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vmaxsb(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
+}
+
+/* vec_vmaxub */
+
+static vector unsigned char __ATTRS_o_ai vec_vmaxub(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vmaxub(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vmaxub(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vmaxub(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
+}
+
+/* vec_vmaxsh */
+
+static vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vmaxsh(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vmaxsh((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vmaxsh(__a, (vector short)__b);
+}
+
+/* vec_vmaxuh */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vmaxuh(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
+}
+
+/* vec_vmaxsw */
+
+static vector int __ATTRS_o_ai vec_vmaxsw(vector int __a, vector int __b) {
+ return __builtin_altivec_vmaxsw(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vmaxsw((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vmaxsw(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vmaxsw(__a, (vector int)__b);
+}
+
+/* vec_vmaxuw */
+
+static vector unsigned int __ATTRS_o_ai vec_vmaxuw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vmaxuw(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vmaxuw(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
+}
+
+/* vec_vmaxfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vmaxfp(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvmaxsp(__a, __b);
+#else
+ return __builtin_altivec_vmaxfp(__a, __b);
+#endif
+}
+
+/* vec_mergeh */
+
+static vector signed char __ATTRS_o_ai vec_mergeh(vector signed char __a,
+ vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_mergeh(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector bool char __ATTRS_o_ai vec_mergeh(vector bool char __a,
+ vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector short __ATTRS_o_ai vec_mergeh(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector bool short __ATTRS_o_ai vec_mergeh(vector bool short __a,
+ vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector int __ATTRS_o_ai vec_mergeh(vector int __a, vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_mergeh(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector float __ATTRS_o_ai vec_mergeh(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_mergeh(vector signed long long __a, vector signed long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_mergeh(vector signed long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector signed long long)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector signed long long __b) {
+ return vec_perm((vector signed long long)__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mergeh(vector unsigned long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector unsigned long long)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
+ return vec_perm((vector unsigned long long)__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector bool long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector double __ATTRS_o_ai vec_mergeh(vector double __a,
+ vector double __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+static vector double __ATTRS_o_ai vec_mergeh(vector double __a,
+ vector bool long long __b) {
+ return vec_perm(__a, (vector double)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+static vector double __ATTRS_o_ai vec_mergeh(vector bool long long __a,
+ vector double __b) {
+ return vec_perm((vector double)__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+#endif
+
+/* vec_vmrghb */
+
+#define __builtin_altivec_vmrghb vec_vmrghb
+
+static vector signed char __ATTRS_o_ai vec_vmrghb(vector signed char __a,
+ vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vmrghb(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector bool char __ATTRS_o_ai vec_vmrghb(vector bool char __a,
+ vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+/* vec_vmrghh */
+
+#define __builtin_altivec_vmrghh vec_vmrghh
+
+static vector short __ATTRS_o_ai vec_vmrghh(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmrghh(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector bool short __ATTRS_o_ai vec_vmrghh(vector bool short __a,
+ vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+/* vec_vmrghw */
+
+#define __builtin_altivec_vmrghw vec_vmrghw
+
+static vector int __ATTRS_o_ai vec_vmrghw(vector int __a, vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vmrghw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector float __ATTRS_o_ai vec_vmrghw(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+/* vec_mergel */
+
+static vector signed char __ATTRS_o_ai vec_mergel(vector signed char __a,
+ vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_mergel(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector bool char __ATTRS_o_ai vec_mergel(vector bool char __a,
+ vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector short __ATTRS_o_ai vec_mergel(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mergel(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector bool short __ATTRS_o_ai vec_mergel(vector bool short __a,
+ vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector int __ATTRS_o_ai vec_mergel(vector int __a, vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_mergel(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector float __ATTRS_o_ai vec_mergel(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_mergel(vector signed long long __a, vector signed long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector signed long long __ATTRS_o_ai
+vec_mergel(vector signed long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector signed long long)__b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector signed long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector signed long long __b) {
+ return vec_perm((vector signed long long)__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_mergel(vector unsigned long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector unsigned long long)__b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector unsigned long long __b) {
+ return vec_perm((vector unsigned long long)__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector bool long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector bool long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector double __ATTRS_o_ai
+vec_mergel(vector double __a, vector double __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector double __ATTRS_o_ai
+vec_mergel(vector double __a, vector bool long long __b) {
+ return vec_perm(__a, (vector double)__b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector double __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector double __b) {
+ return vec_perm((vector double)__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+#endif
+
+/* vec_vmrglb */
+
+#define __builtin_altivec_vmrglb vec_vmrglb
+
+static vector signed char __ATTRS_o_ai vec_vmrglb(vector signed char __a,
+ vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vmrglb(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector bool char __ATTRS_o_ai vec_vmrglb(vector bool char __a,
+ vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+/* vec_vmrglh */
+
+#define __builtin_altivec_vmrglh vec_vmrglh
+
+static vector short __ATTRS_o_ai vec_vmrglh(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmrglh(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector bool short __ATTRS_o_ai vec_vmrglh(vector bool short __a,
+ vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+/* vec_vmrglw */
+
+#define __builtin_altivec_vmrglw vec_vmrglw
+
+static vector int __ATTRS_o_ai vec_vmrglw(vector int __a, vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vmrglw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector float __ATTRS_o_ai vec_vmrglw(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+
+#ifdef __POWER8_VECTOR__
+/* vec_mergee */
+
+static vector bool int __ATTRS_o_ai
+vec_mergee(vector bool int __a, vector bool int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
+}
+
+static vector signed int __ATTRS_o_ai
+vec_mergee(vector signed int __a, vector signed int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mergee(vector unsigned int __a, vector unsigned int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
+}
+
+/* vec_mergeo */
+
+static vector bool int __ATTRS_o_ai
+vec_mergeo(vector bool int __a, vector bool int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector signed int __ATTRS_o_ai
+vec_mergeo(vector signed int __a, vector signed int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mergeo(vector unsigned int __a, vector unsigned int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+#endif
+
+/* vec_mfvscr */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_mfvscr(void) {
+ return __builtin_altivec_mfvscr();
+}
+
+/* vec_min */
+
+static vector signed char __ATTRS_o_ai vec_min(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vminsb(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_min(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vminsb((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_min(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vminsb(__a, (vector signed char)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_min(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vminub(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_min(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vminub((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_min(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
+}
+
+static vector short __ATTRS_o_ai vec_min(vector short __a, vector short __b) {
+ return __builtin_altivec_vminsh(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_min(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vminsh((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_min(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vminsh(__a, (vector short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_min(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vminuh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_min(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_min(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
+}
+
+static vector int __ATTRS_o_ai vec_min(vector int __a, vector int __b) {
+ return __builtin_altivec_vminsw(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_min(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vminsw((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_min(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vminsw(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_min(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vminuw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_min(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_min(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_min(vector signed long long __a, vector signed long long __b) {
+ return __builtin_altivec_vminsd(__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_min(vector bool long long __a, vector signed long long __b) {
+ return __builtin_altivec_vminsd((vector signed long long)__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai vec_min(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vminsd(__a, (vector signed long long)__b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_min(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vminud(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_min(vector bool long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vminud((vector unsigned long long)__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_min(vector unsigned long long __a, vector bool long long __b) {
+ return __builtin_altivec_vminud(__a, (vector unsigned long long)__b);
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_min(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvminsp(__a, __b);
+#else
+ return __builtin_altivec_vminfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_min(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvmindp(__a, __b);
+}
+#endif
+
+/* vec_vminsb */
+
+static vector signed char __ATTRS_o_ai vec_vminsb(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vminsb(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vminsb(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vminsb((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vminsb(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vminsb(__a, (vector signed char)__b);
+}
+
+/* vec_vminub */
+
+static vector unsigned char __ATTRS_o_ai vec_vminub(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vminub(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vminub(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vminub((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vminub(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
+}
+
+/* vec_vminsh */
+
+static vector short __ATTRS_o_ai vec_vminsh(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vminsh(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vminsh(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vminsh((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vminsh(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vminsh(__a, (vector short)__b);
+}
+
+/* vec_vminuh */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vminuh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vminuh(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
+}
+
+/* vec_vminsw */
+
+static vector int __ATTRS_o_ai vec_vminsw(vector int __a, vector int __b) {
+ return __builtin_altivec_vminsw(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vminsw(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vminsw((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vminsw(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vminsw(__a, (vector int)__b);
+}
+
+/* vec_vminuw */
+
+static vector unsigned int __ATTRS_o_ai vec_vminuw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vminuw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vminuw(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vminuw(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
+}
+
+/* vec_vminfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vminfp(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvminsp(__a, __b);
+#else
+ return __builtin_altivec_vminfp(__a, __b);
+#endif
+}
+
+/* vec_mladd */
+
+#define __builtin_altivec_vmladduhm vec_mladd
+
+static vector short __ATTRS_o_ai vec_mladd(vector short __a, vector short __b,
+ vector short __c) {
+ return __a * __b + __c;
+}
+
+static vector short __ATTRS_o_ai vec_mladd(vector short __a,
+ vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * (vector short)__b + (vector short)__c;
+}
+
+static vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
+ vector short __b, vector short __c) {
+ return (vector short)__a * __b + __c;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * __b + __c;
+}
+
+/* vec_vmladduhm */
+
+static vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
+ vector short __b,
+ vector short __c) {
+ return __a * __b + __c;
+}
+
+static vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
+ vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * (vector short)__b + (vector short)__c;
+}
+
+static vector short __ATTRS_o_ai vec_vmladduhm(vector unsigned short __a,
+ vector short __b,
+ vector short __c) {
+ return (vector short)__a * __b + __c;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmladduhm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * __b + __c;
+}
+
+/* vec_mradds */
+
+static vector short __attribute__((__always_inline__))
+vec_mradds(vector short __a, vector short __b, vector short __c) {
+ return __builtin_altivec_vmhraddshs(__a, __b, __c);
+}
+
+/* vec_vmhraddshs */
+
+static vector short __attribute__((__always_inline__))
+vec_vmhraddshs(vector short __a, vector short __b, vector short __c) {
+ return __builtin_altivec_vmhraddshs(__a, __b, __c);
+}
+
+/* vec_msum */
+
+static vector int __ATTRS_o_ai vec_msum(vector signed char __a,
+ vector unsigned char __b,
+ vector int __c) {
+ return __builtin_altivec_vmsummbm(__a, __b, __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_msum(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumubm(__a, __b, __c);
+}
+
+static vector int __ATTRS_o_ai vec_msum(vector short __a, vector short __b,
+ vector int __c) {
+ return __builtin_altivec_vmsumshm(__a, __b, __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_msum(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhm(__a, __b, __c);
+}
+
+/* vec_vmsummbm */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsummbm(vector signed char __a, vector unsigned char __b, vector int __c) {
+ return __builtin_altivec_vmsummbm(__a, __b, __c);
+}
+
+/* vec_vmsumubm */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumubm(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumubm(__a, __b, __c);
+}
+
+/* vec_vmsumshm */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsumshm(vector short __a, vector short __b, vector int __c) {
+ return __builtin_altivec_vmsumshm(__a, __b, __c);
+}
+
+/* vec_vmsumuhm */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumuhm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhm(__a, __b, __c);
+}
+
+/* vec_msums */
+
+static vector int __ATTRS_o_ai vec_msums(vector short __a, vector short __b,
+ vector int __c) {
+ return __builtin_altivec_vmsumshs(__a, __b, __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_msums(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhs(__a, __b, __c);
+}
+
+/* vec_vmsumshs */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsumshs(vector short __a, vector short __b, vector int __c) {
+ return __builtin_altivec_vmsumshs(__a, __b, __c);
+}
+
+/* vec_vmsumuhs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumuhs(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhs(__a, __b, __c);
+}
+
+/* vec_mtvscr */
+
+static void __ATTRS_o_ai vec_mtvscr(vector signed char __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector bool char __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector short __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector bool short __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector pixel __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector int __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector bool int __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector float __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+/* vec_mul */
+
+/* Integer vector multiplication will involve multiplication of the odd/even
+ elements separately, then truncating the results and moving to the
+ result vector.
+*/
+static vector signed char __ATTRS_o_ai vec_mul(vector signed char __a,
+ vector signed char __b) {
+ return __a * __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_mul(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a * __b;
+}
+
+static vector signed short __ATTRS_o_ai vec_mul(vector signed short __a,
+ vector signed short __b) {
+ return __a * __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_mul(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a * __b;
+}
+
+static vector signed int __ATTRS_o_ai vec_mul(vector signed int __a,
+ vector signed int __b) {
+ return __a * __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_mul(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a * __b;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_mul(vector signed long long __a, vector signed long long __b) {
+ return __a * __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mul(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a * __b;
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_mul(vector float __a, vector float __b) {
+ return __a * __b;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_mul(vector double __a, vector double __b) {
+ return __a * __b;
+}
+#endif
+
+/* The vmulos* and vmules* instructions have a big endian bias, so
+ we must reverse the meaning of "even" and "odd" for little endian. */
+
+/* vec_mule */
+
+static vector short __ATTRS_o_ai vec_mule(vector signed char __a,
+ vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosb(__a, __b);
+#else
+ return __builtin_altivec_vmulesb(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_mule(vector unsigned char __a,
+ vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloub(__a, __b);
+#else
+ return __builtin_altivec_vmuleub(__a, __b);
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_mule(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosh(__a, __b);
+#else
+ return __builtin_altivec_vmulesh(__a, __b);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_mule(vector unsigned short __a,
+ vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouh(__a, __b);
+#else
+ return __builtin_altivec_vmuleuh(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai vec_mule(vector signed int __a,
+ vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosw(__a, __b);
+#else
+ return __builtin_altivec_vmulesw(__a, __b);
+#endif
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mule(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouw(__a, __b);
+#else
+ return __builtin_altivec_vmuleuw(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vmulesb */
+
+static vector short __attribute__((__always_inline__))
+vec_vmulesb(vector signed char __a, vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosb(__a, __b);
+#else
+ return __builtin_altivec_vmulesb(__a, __b);
+#endif
+}
+
+/* vec_vmuleub */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vmuleub(vector unsigned char __a, vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloub(__a, __b);
+#else
+ return __builtin_altivec_vmuleub(__a, __b);
+#endif
+}
+
+/* vec_vmulesh */
+
+static vector int __attribute__((__always_inline__))
+vec_vmulesh(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosh(__a, __b);
+#else
+ return __builtin_altivec_vmulesh(__a, __b);
+#endif
+}
+
+/* vec_vmuleuh */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouh(__a, __b);
+#else
+ return __builtin_altivec_vmuleuh(__a, __b);
+#endif
+}
+
+/* vec_mulo */
+
+static vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
+ vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesb(__a, __b);
+#else
+ return __builtin_altivec_vmulosb(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_mulo(vector unsigned char __a,
+ vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleub(__a, __b);
+#else
+ return __builtin_altivec_vmuloub(__a, __b);
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_mulo(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesh(__a, __b);
+#else
+ return __builtin_altivec_vmulosh(__a, __b);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_mulo(vector unsigned short __a,
+ vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuh(__a, __b);
+#else
+ return __builtin_altivec_vmulouh(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai vec_mulo(vector signed int __a,
+ vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesw(__a, __b);
+#else
+ return __builtin_altivec_vmulosw(__a, __b);
+#endif
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mulo(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuw(__a, __b);
+#else
+ return __builtin_altivec_vmulouw(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vmulosb */
+
+static vector short __attribute__((__always_inline__))
+vec_vmulosb(vector signed char __a, vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesb(__a, __b);
+#else
+ return __builtin_altivec_vmulosb(__a, __b);
+#endif
+}
+
+/* vec_vmuloub */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vmuloub(vector unsigned char __a, vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleub(__a, __b);
+#else
+ return __builtin_altivec_vmuloub(__a, __b);
+#endif
+}
+
+/* vec_vmulosh */
+
+static vector int __attribute__((__always_inline__))
+vec_vmulosh(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesh(__a, __b);
+#else
+ return __builtin_altivec_vmulosh(__a, __b);
+#endif
+}
+
+/* vec_vmulouh */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmulouh(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuh(__a, __b);
+#else
+ return __builtin_altivec_vmulouh(__a, __b);
+#endif
+}
+
+/* vec_nand */
+
+#ifdef __POWER8_VECTOR__
+static vector signed char __ATTRS_o_ai vec_nand(vector signed char __a,
+ vector signed char __b) {
+ return ~(__a & __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_nand(vector signed char __a,
+ vector bool char __b) {
+ return ~(__a & __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_nand(vector bool char __a,
+ vector signed char __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_nand(vector unsigned char __a,
+ vector unsigned char __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_nand(vector unsigned char __a,
+ vector bool char __b) {
+ return ~(__a & __b);
+
+}
+
+static vector unsigned char __ATTRS_o_ai vec_nand(vector bool char __a,
+ vector unsigned char __b) {
+ return ~(__a & __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a & __b);
+}
+
+static vector signed short __ATTRS_o_ai vec_nand(vector signed short __a,
+ vector signed short __b) {
+ return ~(__a & __b);
+}
+
+static vector signed short __ATTRS_o_ai vec_nand(vector signed short __a,
+ vector bool short __b) {
+ return ~(__a & __b);
+}
+
+static vector signed short __ATTRS_o_ai vec_nand(vector bool short __a,
+ vector signed short __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_nand(vector unsigned short __a,
+ vector unsigned short __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_nand(vector unsigned short __a,
+ vector bool short __b) {
+ return ~(__a & __b);
+
+}
+
+static vector bool short __ATTRS_o_ai vec_nand(vector bool short __a,
+ vector bool short __b) {
+ return ~(__a & __b);
+
+}
+
+static vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
+ vector signed int __b) {
+ return ~(__a & __b);
+}
+
+static vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
+ vector bool int __b) {
+ return ~(__a & __b);
+}
+
+static vector signed int __ATTRS_o_ai vec_nand(vector bool int __a,
+ vector signed int __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_nand(vector unsigned int __a,
+ vector unsigned int __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_nand(vector unsigned int __a,
+ vector bool int __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_nand(vector bool int __a,
+ vector unsigned int __b) {
+ return ~(__a & __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a & __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_nand(vector signed long long __a, vector signed long long __b) {
+ return ~(__a & __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_nand(vector signed long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector signed long long __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_nand(vector unsigned long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector unsigned long long __b) {
+ return ~(__a & __b);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
+#endif
+
+/* vec_nmadd */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai
+vec_nmadd(vector float __a, vector float __b, vector float __c) {
+ return __builtin_vsx_xvnmaddasp(__a, __b, __c);
+}
+
+static vector double __ATTRS_o_ai
+vec_nmadd(vector double __a, vector double __b, vector double __c) {
+ return __builtin_vsx_xvnmaddadp(__a, __b, __c);
+}
+#endif
+
+/* vec_nmsub */
+
+static vector float __ATTRS_o_ai
+vec_nmsub(vector float __a, vector float __b, vector float __c) {
+#ifdef __VSX__
+ return __builtin_vsx_xvnmsubasp(__a, __b, __c);
+#else
+ return __builtin_altivec_vnmsubfp(__a, __b, __c);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_nmsub(vector double __a, vector double __b, vector double __c) {
+ return __builtin_vsx_xvnmsubadp(__a, __b, __c);
+}
+#endif
+
+/* vec_vnmsubfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vnmsubfp(vector float __a, vector float __b, vector float __c) {
+ return __builtin_altivec_vnmsubfp(__a, __b, __c);
+}
+
+/* vec_nor */
+
+#define __builtin_altivec_vnor vec_nor
+
+static vector signed char __ATTRS_o_ai vec_nor(vector signed char __a,
+ vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_nor(vector unsigned char __a,
+ vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_nor(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static vector short __ATTRS_o_ai vec_nor(vector short __a, vector short __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_nor(vector unsigned short __a,
+ vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_nor(vector bool short __a,
+ vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static vector int __ATTRS_o_ai vec_nor(vector int __a, vector int __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_nor(vector unsigned int __a,
+ vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_nor(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static vector float __ATTRS_o_ai vec_nor(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ ~((vector unsigned int)__a | (vector unsigned int)__b);
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_nor(vector double __a, vector double __b) {
+ vector unsigned long long __res =
+ ~((vector unsigned long long)__a | (vector unsigned long long)__b);
+ return (vector double)__res;
+}
+#endif
+
+/* vec_vnor */
+
+static vector signed char __ATTRS_o_ai vec_vnor(vector signed char __a,
+ vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vnor(vector unsigned char __a,
+ vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static vector short __ATTRS_o_ai vec_vnor(vector short __a, vector short __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vnor(vector unsigned short __a,
+ vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vnor(vector bool short __a,
+ vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static vector int __ATTRS_o_ai vec_vnor(vector int __a, vector int __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vnor(vector unsigned int __a,
+ vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static vector float __ATTRS_o_ai vec_vnor(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ ~((vector unsigned int)__a | (vector unsigned int)__b);
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_nor(vector signed long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static vector bool long long __ATTRS_o_ai vec_nor(vector bool long long __a,
+ vector bool long long __b) {
+ return ~(__a | __b);
+}
+#endif
+
+/* vec_or */
+
+#define __builtin_altivec_vor vec_or
+
+static vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
+ vector signed char __b) {
+ return __a | __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_or(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a | __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
+ vector bool char __b) {
+ return __a | (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_or(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a | __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_or(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a | __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_or(vector unsigned char __a,
+ vector bool char __b) {
+ return __a | (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_or(vector bool char __a,
+ vector bool char __b) {
+ return __a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_or(vector short __a, vector short __b) {
+ return __a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_or(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_or(vector short __a,
+ vector bool short __b) {
+ return __a | (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_or(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a | __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_or(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a | __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_or(vector unsigned short __a,
+ vector bool short __b) {
+ return __a | (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_or(vector bool short __a,
+ vector bool short __b) {
+ return __a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_or(vector int __a, vector int __b) {
+ return __a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_or(vector bool int __a, vector int __b) {
+ return (vector int)__a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_or(vector int __a, vector bool int __b) {
+ return __a | (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_or(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a | __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_or(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a | __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_or(vector unsigned int __a,
+ vector bool int __b) {
+ return __a | (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_or(vector bool int __a,
+ vector bool int __b) {
+ return __a | __b;
+}
+
+static vector float __ATTRS_o_ai vec_or(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_or(vector bool int __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_or(vector float __a, vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_or(vector bool long long __a,
+ vector double __b) {
+ return (vector unsigned long long)__a | (vector unsigned long long)__b;
+}
+
+static vector double __ATTRS_o_ai vec_or(vector double __a,
+ vector bool long long __b) {
+ return (vector unsigned long long)__a | (vector unsigned long long)__b;
+}
+
+static vector double __ATTRS_o_ai vec_or(vector double __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a | (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_or(vector signed long long __a, vector signed long long __b) {
+ return __a | __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_or(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a | __b;
+}
+
+static vector signed long long __ATTRS_o_ai vec_or(vector signed long long __a,
+ vector bool long long __b) {
+ return __a | (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_or(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a | __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_or(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a | __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_or(vector unsigned long long __a, vector bool long long __b) {
+ return __a | (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_or(vector bool long long __a,
+ vector bool long long __b) {
+ return __a | __b;
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector signed char __ATTRS_o_ai vec_orc(vector signed char __a,
+ vector signed char __b) {
+ return __a | ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_orc(vector signed char __a,
+ vector bool char __b) {
+ return __a | ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_orc(vector bool char __a,
+ vector signed char __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_orc(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_orc(vector unsigned char __a,
+ vector bool char __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_orc(vector bool char __a,
+ vector unsigned char __b) {
+ return __a | ~__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
+ vector bool char __b) {
+ return __a | ~__b;
+}
+
+static vector signed short __ATTRS_o_ai vec_orc(vector signed short __a,
+ vector signed short __b) {
+ return __a | ~__b;
+}
+
+static vector signed short __ATTRS_o_ai vec_orc(vector signed short __a,
+ vector bool short __b) {
+ return __a | ~__b;
+}
+
+static vector signed short __ATTRS_o_ai vec_orc(vector bool short __a,
+ vector signed short __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_orc(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_orc(vector unsigned short __a,
+ vector bool short __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_orc(vector bool short __a, vector unsigned short __b) {
+ return __a | ~__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_orc(vector bool short __a,
+ vector bool short __b) {
+ return __a | ~__b;
+}
+
+static vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
+ vector signed int __b) {
+ return __a | ~__b;
+}
+
+static vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
+ vector bool int __b) {
+ return __a | ~__b;
+}
+
+static vector signed int __ATTRS_o_ai vec_orc(vector bool int __a,
+ vector signed int __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_orc(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_orc(vector unsigned int __a,
+ vector bool int __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_orc(vector bool int __a,
+ vector unsigned int __b) {
+ return __a | ~__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
+ vector bool int __b) {
+ return __a | ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_orc(vector signed long long __a, vector signed long long __b) {
+ return __a | ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai vec_orc(vector signed long long __a,
+ vector bool long long __b) {
+ return __a | ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector signed long long __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_orc(vector unsigned long long __a, vector bool long long __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector unsigned long long __b) {
+ return __a | ~__b;
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector bool long long __b) {
+ return __a | ~__b;
+}
+#endif
+
+/* vec_vor */
+
+static vector signed char __ATTRS_o_ai vec_vor(vector signed char __a,
+ vector signed char __b) {
+ return __a | __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vor(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a | __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vor(vector signed char __a,
+ vector bool char __b) {
+ return __a | (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vor(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a | __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vor(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a | __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vor(vector unsigned char __a,
+ vector bool char __b) {
+ return __a | (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_vor(vector bool char __a,
+ vector bool char __b) {
+ return __a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_vor(vector short __a, vector short __b) {
+ return __a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_vor(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_vor(vector short __a,
+ vector bool short __b) {
+ return __a | (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vor(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a | __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vor(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a | __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vor(vector unsigned short __a,
+ vector bool short __b) {
+ return __a | (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_vor(vector bool short __a,
+ vector bool short __b) {
+ return __a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_vor(vector int __a, vector int __b) {
+ return __a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_vor(vector bool int __a, vector int __b) {
+ return (vector int)__a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_vor(vector int __a, vector bool int __b) {
+ return __a | (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vor(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a | __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vor(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a | __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vor(vector unsigned int __a,
+ vector bool int __b) {
+ return __a | (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_vor(vector bool int __a,
+ vector bool int __b) {
+ return __a | __b;
+}
+
+static vector float __ATTRS_o_ai vec_vor(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vor(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vor(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_vor(vector signed long long __a, vector signed long long __b) {
+ return __a | __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vor(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a | __b;
+}
+
+static vector signed long long __ATTRS_o_ai vec_vor(vector signed long long __a,
+ vector bool long long __b) {
+ return __a | (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vor(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a | __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vor(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a | __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vor(vector unsigned long long __a, vector bool long long __b) {
+ return __a | (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_vor(vector bool long long __a,
+ vector bool long long __b) {
+ return __a | __b;
+}
+#endif
+
+/* vec_pack */
+
+/* The various vector pack instructions have a big-endian bias, so for
+ little endian we must handle reversed element numbering. */
+
+static vector signed char __ATTRS_o_ai vec_pack(vector signed short __a,
+ vector signed short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_pack(vector unsigned short __a,
+ vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static vector bool char __ATTRS_o_ai vec_pack(vector bool short __a,
+ vector bool short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static vector short __ATTRS_o_ai vec_pack(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_pack(vector unsigned int __a,
+ vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_pack(vector bool int __a,
+ vector bool int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+#ifdef __VSX__
+static vector signed int __ATTRS_o_ai vec_pack(vector signed long long __a,
+ vector signed long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector signed int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+static vector unsigned int __ATTRS_o_ai
+vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_pack(vector bool long long __a,
+ vector bool long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector bool int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+#endif
+
+/* vec_vpkuhum */
+
+#define __builtin_altivec_vpkuhum vec_vpkuhum
+
+static vector signed char __ATTRS_o_ai vec_vpkuhum(vector signed short __a,
+ vector signed short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vpkuhum(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static vector bool char __ATTRS_o_ai vec_vpkuhum(vector bool short __a,
+ vector bool short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+/* vec_vpkuwum */
+
+#define __builtin_altivec_vpkuwum vec_vpkuwum
+
+static vector short __ATTRS_o_ai vec_vpkuwum(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vpkuwum(vector unsigned int __a,
+ vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_vpkuwum(vector bool int __a,
+ vector bool int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+/* vec_vpkudum */
+
+#ifdef __POWER8_VECTOR__
+#define __builtin_altivec_vpkudum vec_vpkudum
+
+static vector int __ATTRS_o_ai vec_vpkudum(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_vpkudum(vector bool long long __a,
+ vector bool long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)vec_perm(
+ (vector long long)__a, (vector long long)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector bool int)vec_perm(
+ (vector long long)__a, (vector long long)__b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+#endif
+
+/* vec_packpx */
+
+static vector pixel __attribute__((__always_inline__))
+vec_packpx(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
+#else
+ return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
+#endif
+}
+
+/* vec_vpkpx */
+
+static vector pixel __attribute__((__always_inline__))
+vec_vpkpx(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
+#else
+ return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
+#endif
+}
+
+/* vec_packs */
+
+static vector signed char __ATTRS_o_ai vec_packs(vector short __a,
+ vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshss(__b, __a);
+#else
+ return __builtin_altivec_vpkshss(__a, __b);
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_packs(vector unsigned short __a,
+ vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+static vector signed short __ATTRS_o_ai vec_packs(vector int __a,
+ vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswss(__b, __a);
+#else
+ return __builtin_altivec_vpkswss(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_packs(vector unsigned int __a,
+ vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector int __ATTRS_o_ai vec_packs(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdss(__b, __a);
+#else
+ return __builtin_altivec_vpksdss(__a, __b);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkudus(__b, __a);
+#else
+ return __builtin_altivec_vpkudus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkshss */
+
+static vector signed char __attribute__((__always_inline__))
+vec_vpkshss(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshss(__b, __a);
+#else
+ return __builtin_altivec_vpkshss(__a, __b);
+#endif
+}
+
+/* vec_vpksdss */
+
+#ifdef __POWER8_VECTOR__
+static vector int __ATTRS_o_ai vec_vpksdss(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdss(__b, __a);
+#else
+ return __builtin_altivec_vpksdss(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkuhus */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+/* vec_vpkudus */
+
+#ifdef __POWER8_VECTOR__
+static vector unsigned int __attribute__((__always_inline__))
+vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkudus(__b, __a);
+#else
+ return __builtin_altivec_vpkudus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkswss */
+
+static vector signed short __attribute__((__always_inline__))
+vec_vpkswss(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswss(__b, __a);
+#else
+ return __builtin_altivec_vpkswss(__a, __b);
+#endif
+}
+
+/* vec_vpkuwus */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vpkuwus(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+/* vec_packsu */
+
+static vector unsigned char __ATTRS_o_ai vec_packsu(vector short __a,
+ vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshus(__b, __a);
+#else
+ return __builtin_altivec_vpkshus(__a, __b);
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_packsu(vector unsigned short __a,
+ vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_packsu(vector int __a,
+ vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswus(__b, __a);
+#else
+ return __builtin_altivec_vpkswus(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_packsu(vector unsigned int __a,
+ vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector unsigned int __ATTRS_o_ai vec_packsu(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdus(__b, __a);
+#else
+ return __builtin_altivec_vpksdus(__a, __b);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkudus(__b, __a);
+#else
+ return __builtin_altivec_vpkudus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkshus */
+
+static vector unsigned char __ATTRS_o_ai vec_vpkshus(vector short __a,
+ vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshus(__b, __a);
+#else
+ return __builtin_altivec_vpkshus(__a, __b);
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vpkshus(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+/* vec_vpkswus */
+
+static vector unsigned short __ATTRS_o_ai vec_vpkswus(vector int __a,
+ vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswus(__b, __a);
+#else
+ return __builtin_altivec_vpkswus(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vpkswus(vector unsigned int __a,
+ vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+/* vec_vpksdus */
+
+#ifdef __POWER8_VECTOR__
+static vector unsigned int __ATTRS_o_ai vec_vpksdus(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdus(__b, __a);
+#else
+ return __builtin_altivec_vpksdus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_perm */
+
+// The vperm instruction is defined architecturally with a big-endian bias.
+// For little endian, we swap the input operands and invert the permute
+// control vector. Only the rightmost 5 bits matter, so we could use
+// a vector of all 31s instead of all 255s to perform the inversion.
+// However, when the PCV is not a constant, using 255 has an advantage
+// in that the vec_xor can be recognized as a vec_nor (and for P8 and
+// later, possibly a vec_nand).
+
+static vector signed char __ATTRS_o_ai vec_perm(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed char)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector signed char)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_perm(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned char)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned char)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector bool char __ATTRS_o_ai vec_perm(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool char)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector bool char)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector short __ATTRS_o_ai vec_perm(vector signed short __a,
+ vector signed short __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed short)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector signed short)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_perm(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned short)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned short)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_perm(vector bool short __a,
+ vector bool short __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool short)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector bool short)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, vector pixel __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector pixel)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector pixel)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_perm(vector signed int __a,
+ vector signed int __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed int)__builtin_altivec_vperm_4si(__b, __a, __d);
+#else
+ return (vector signed int)__builtin_altivec_vperm_4si(__a, __b, __c);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_perm(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_perm(vector bool int __a,
+ vector bool int __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool int)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector bool int)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector float __ATTRS_o_ai vec_perm(vector float __a, vector float __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector float)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector float)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+#ifdef __VSX__
+static vector long long __ATTRS_o_ai vec_perm(vector signed long long __a,
+ vector signed long long __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed long long)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector signed long long)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned long long)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned long long)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_perm(vector bool long long __a, vector bool long long __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool long long)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector bool long long)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector double __ATTRS_o_ai vec_perm(vector double __a, vector double __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector double)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector double)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+#endif
+
+/* vec_vperm */
+
+static vector signed char __ATTRS_o_ai vec_vperm(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vperm(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector bool char __ATTRS_o_ai vec_vperm(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector short __ATTRS_o_ai vec_vperm(vector short __a, vector short __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vperm(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector bool short __ATTRS_o_ai vec_vperm(vector bool short __a,
+ vector bool short __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector pixel __ATTRS_o_ai vec_vperm(vector pixel __a, vector pixel __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector int __ATTRS_o_ai vec_vperm(vector int __a, vector int __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vperm(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector bool int __ATTRS_o_ai vec_vperm(vector bool int __a,
+ vector bool int __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector float __ATTRS_o_ai vec_vperm(vector float __a, vector float __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+#ifdef __VSX__
+static vector long long __ATTRS_o_ai vec_vperm(vector long long __a,
+ vector long long __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vperm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector double __ATTRS_o_ai vec_vperm(vector double __a,
+ vector double __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+#endif
+
+/* vec_re */
+
+static vector float __ATTRS_o_ai
+vec_re(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvresp(__a);
+#else
+ return __builtin_altivec_vrefp(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_re(vector double __a) {
+ return __builtin_vsx_xvredp(__a);
+}
+#endif
+
+/* vec_vrefp */
+
+static vector float __attribute__((__always_inline__))
+vec_vrefp(vector float __a) {
+ return __builtin_altivec_vrefp(__a);
+}
+
+/* vec_rl */
+
+static vector signed char __ATTRS_o_ai vec_rl(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_rl(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_rl(vector short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vrlh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_rl(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_rl(vector int __a, vector unsigned int __b) {
+ return __builtin_altivec_vrlw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_rl(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_rl(vector signed long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vrld(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vrld(__a, __b);
+}
+#endif
+
+/* vec_vrlb */
+
+static vector signed char __ATTRS_o_ai vec_vrlb(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vrlb(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+/* vec_vrlh */
+
+static vector short __ATTRS_o_ai vec_vrlh(vector short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vrlh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vrlh(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
+}
+
+/* vec_vrlw */
+
+static vector int __ATTRS_o_ai vec_vrlw(vector int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vrlw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vrlw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
+}
+
+/* vec_round */
+
+static vector float __ATTRS_o_ai vec_round(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspi(__a);
+#else
+ return __builtin_altivec_vrfin(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_round(vector double __a) {
+ return __builtin_vsx_xvrdpi(__a);
+}
+
+/* vec_rint */
+
+static vector float __ATTRS_o_ai
+vec_rint(vector float __a) {
+ return __builtin_vsx_xvrspic(__a);
+}
+
+static vector double __ATTRS_o_ai
+vec_rint(vector double __a) {
+ return __builtin_vsx_xvrdpic(__a);
+}
+
+/* vec_nearbyint */
+
+static vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
+ return __builtin_vsx_xvrspi(__a);
+}
+
+static vector double __ATTRS_o_ai vec_nearbyint(vector double __a) {
+ return __builtin_vsx_xvrdpi(__a);
+}
+#endif
+
+/* vec_vrfin */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfin(vector float __a) {
+ return __builtin_altivec_vrfin(__a);
+}
+
+/* vec_sqrt */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai vec_sqrt(vector float __a) {
+ return __builtin_vsx_xvsqrtsp(__a);
+}
+
+static vector double __ATTRS_o_ai vec_sqrt(vector double __a) {
+ return __builtin_vsx_xvsqrtdp(__a);
+}
+#endif
+
+/* vec_rsqrte */
+
+static vector float __ATTRS_o_ai
+vec_rsqrte(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrsqrtesp(__a);
+#else
+ return __builtin_altivec_vrsqrtefp(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
+ return __builtin_vsx_xvrsqrtedp(__a);
+}
+#endif
+
+/* vec_vrsqrtefp */
+
+static __vector float __attribute__((__always_inline__))
+vec_vrsqrtefp(vector float __a) {
+ return __builtin_altivec_vrsqrtefp(__a);
+}
+
+/* vec_sel */
+
+#define __builtin_altivec_vsel_4si vec_sel
+
+static vector signed char __ATTRS_o_ai vec_sel(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static vector signed char __ATTRS_o_ai vec_sel(vector signed char __a,
+ vector signed char __b,
+ vector bool char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sel(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sel(vector unsigned char __a,
+ vector unsigned char __b,
+ vector bool char __c) {
+ return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
+}
+
+static vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c) {
+ return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
+}
+
+static vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
+ vector bool char __b,
+ vector bool char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector short __ATTRS_o_ai vec_sel(vector short __a, vector short __b,
+ vector unsigned short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static vector short __ATTRS_o_ai vec_sel(vector short __a, vector short __b,
+ vector bool short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sel(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sel(vector unsigned short __a,
+ vector unsigned short __b,
+ vector bool short __c) {
+ return (__a & ~(vector unsigned short)__c) |
+ (__b & (vector unsigned short)__c);
+}
+
+static vector bool short __ATTRS_o_ai vec_sel(vector bool short __a,
+ vector bool short __b,
+ vector unsigned short __c) {
+ return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
+}
+
+static vector bool short __ATTRS_o_ai vec_sel(vector bool short __a,
+ vector bool short __b,
+ vector bool short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector int __ATTRS_o_ai vec_sel(vector int __a, vector int __b,
+ vector unsigned int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static vector int __ATTRS_o_ai vec_sel(vector int __a, vector int __b,
+ vector bool int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sel(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sel(vector unsigned int __a,
+ vector unsigned int __b,
+ vector bool int __c) {
+ return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
+}
+
+static vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
+ vector bool int __b,
+ vector unsigned int __c) {
+ return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
+}
+
+static vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
+ vector bool int __b,
+ vector bool int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector float __ATTRS_o_ai vec_sel(vector float __a, vector float __b,
+ vector unsigned int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_sel(vector float __a, vector float __b,
+ vector bool int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_sel(vector double __a, vector double __b,
+ vector bool long long __c) {
+ vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
+ ((vector long long)__b & (vector long long)__c);
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_sel(vector double __a, vector double __b,
+ vector unsigned long long __c) {
+ vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
+ ((vector long long)__b & (vector long long)__c);
+ return (vector double)__res;
+}
+#endif
+
+/* vec_vsel */
+
+static vector signed char __ATTRS_o_ai vec_vsel(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsel(vector signed char __a,
+ vector signed char __b,
+ vector bool char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsel(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsel(vector unsigned char __a,
+ vector unsigned char __b,
+ vector bool char __c) {
+ return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c) {
+ return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
+ vector bool char __b,
+ vector bool char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector short __ATTRS_o_ai vec_vsel(vector short __a, vector short __b,
+ vector unsigned short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static vector short __ATTRS_o_ai vec_vsel(vector short __a, vector short __b,
+ vector bool short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsel(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsel(vector unsigned short __a,
+ vector unsigned short __b,
+ vector bool short __c) {
+ return (__a & ~(vector unsigned short)__c) |
+ (__b & (vector unsigned short)__c);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsel(vector bool short __a,
+ vector bool short __b,
+ vector unsigned short __c) {
+ return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsel(vector bool short __a,
+ vector bool short __b,
+ vector bool short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector int __ATTRS_o_ai vec_vsel(vector int __a, vector int __b,
+ vector unsigned int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static vector int __ATTRS_o_ai vec_vsel(vector int __a, vector int __b,
+ vector bool int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsel(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsel(vector unsigned int __a,
+ vector unsigned int __b,
+ vector bool int __c) {
+ return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
+ vector bool int __b,
+ vector unsigned int __c) {
+ return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
+ vector bool int __b,
+ vector bool int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector float __ATTRS_o_ai vec_vsel(vector float __a, vector float __b,
+ vector unsigned int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vsel(vector float __a, vector float __b,
+ vector bool int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+/* vec_sl */
+
+static vector signed char __ATTRS_o_ai vec_sl(vector signed char __a,
+ vector unsigned char __b) {
+ return __a << (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sl(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a << __b;
+}
+
+static vector short __ATTRS_o_ai vec_sl(vector short __a,
+ vector unsigned short __b) {
+ return __a << (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sl(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a << __b;
+}
+
+static vector int __ATTRS_o_ai vec_sl(vector int __a, vector unsigned int __b) {
+ return __a << (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sl(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a << __b;
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_sl(vector signed long long __a, vector unsigned long long __b) {
+ return __a << (vector long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_sl(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a << __b;
+}
+#endif
+
+/* vec_vslb */
+
+#define __builtin_altivec_vslb vec_vslb
+
+static vector signed char __ATTRS_o_ai vec_vslb(vector signed char __a,
+ vector unsigned char __b) {
+ return vec_sl(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vslb(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_sl(__a, __b);
+}
+
+/* vec_vslh */
+
+#define __builtin_altivec_vslh vec_vslh
+
+static vector short __ATTRS_o_ai vec_vslh(vector short __a,
+ vector unsigned short __b) {
+ return vec_sl(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vslh(vector unsigned short __a,
+ vector unsigned short __b) {
+ return vec_sl(__a, __b);
+}
+
+/* vec_vslw */
+
+#define __builtin_altivec_vslw vec_vslw
+
+static vector int __ATTRS_o_ai vec_vslw(vector int __a,
+ vector unsigned int __b) {
+ return vec_sl(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vslw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_sl(__a, __b);
+}
+
+/* vec_sld */
+
+#define __builtin_altivec_vsldoi_4si vec_sld
+
+static vector signed char __ATTRS_o_ai vec_sld(vector signed char __a,
+ vector signed char __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sld(vector unsigned char __a,
+ vector unsigned char __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector bool char __ATTRS_o_ai vec_sld(vector bool char __a,
+ vector bool char __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector signed short __ATTRS_o_ai vec_sld(vector signed short __a,
+ vector signed short __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sld(vector unsigned short __a,
+ vector unsigned short __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_sld(vector bool short __a,
+ vector bool short __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector pixel __ATTRS_o_ai vec_sld(vector pixel __a, vector pixel __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector signed int __ATTRS_o_ai vec_sld(vector signed int __a,
+ vector signed int __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sld(vector unsigned int __a,
+ vector unsigned int __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
+ vector bool int __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector float __ATTRS_o_ai vec_sld(vector float __a, vector float __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+/* vec_vsldoi */
+
+static vector signed char __ATTRS_o_ai vec_vsldoi(vector signed char __a,
+ vector signed char __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsldoi(vector unsigned char __a,
+ vector unsigned char __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector short __ATTRS_o_ai vec_vsldoi(vector short __a, vector short __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsldoi(vector unsigned short __a,
+ vector unsigned short __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a, vector pixel __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_vsldoi(vector int __a, vector int __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsldoi(vector unsigned int __a,
+ vector unsigned int __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector float __ATTRS_o_ai vec_vsldoi(vector float __a, vector float __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+/* vec_sll */
+
+static vector signed char __ATTRS_o_ai vec_sll(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_sll(vector signed char __a,
+ vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_sll(vector signed char __a,
+ vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sll(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sll(vector unsigned char __a,
+ vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sll(vector unsigned char __a,
+ vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_sll(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_sll(vector bool char __a,
+ vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_sll(vector bool char __a,
+ vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_sll(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_sll(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_sll(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sll(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sll(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sll(vector unsigned short __a,
+ vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_sll(vector bool short __a,
+ vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_sll(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_sll(vector bool short __a,
+ vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_sll(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_sll(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_sll(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sll(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sll(vector unsigned int __a,
+ vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sll(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_sll(vector bool int __a,
+ vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_sll(vector bool int __a,
+ vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_sll(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_vsl */
+
+static vector signed char __ATTRS_o_ai vec_vsl(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsl(vector signed char __a,
+ vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsl(vector signed char __a,
+ vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsl(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsl(vector unsigned char __a,
+ vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsl(vector unsigned char __a,
+ vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsl(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsl(vector bool char __a,
+ vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsl(vector bool char __a,
+ vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsl(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsl(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsl(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsl(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsl(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsl(vector unsigned short __a,
+ vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsl(vector bool short __a,
+ vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsl(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsl(vector bool short __a,
+ vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsl(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsl(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsl(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsl(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsl(vector unsigned int __a,
+ vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsl(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsl(vector bool int __a,
+ vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsl(vector bool int __a,
+ vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsl(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_slo */
+
+static vector signed char __ATTRS_o_ai vec_slo(vector signed char __a,
+ vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_slo(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_slo(vector unsigned char __a,
+ vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_slo(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_slo(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_slo(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_slo(vector unsigned short __a,
+ vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_slo(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_slo(vector int __a, vector signed char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_slo(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_slo(vector unsigned int __a,
+ vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_slo(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_slo(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_slo(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+/* vec_vslo */
+
+static vector signed char __ATTRS_o_ai vec_vslo(vector signed char __a,
+ vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vslo(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vslo(vector unsigned char __a,
+ vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vslo(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vslo(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vslo(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vslo(vector unsigned short __a,
+ vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vslo(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vslo(vector int __a,
+ vector signed char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vslo(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vslo(vector unsigned int __a,
+ vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vslo(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_vslo(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_vslo(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+/* vec_splat */
+
+static vector signed char __ATTRS_o_ai vec_splat(vector signed char __a,
+ unsigned const int __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_splat(vector unsigned char __a,
+ unsigned const int __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
+}
+
+static vector bool char __ATTRS_o_ai vec_splat(vector bool char __a,
+ unsigned const int __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
+}
+
+static vector signed short __ATTRS_o_ai vec_splat(vector signed short __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1, b0, b1));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_splat(vector unsigned short __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1, b0, b1));
+}
+
+static vector bool short __ATTRS_o_ai vec_splat(vector bool short __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1, b0, b1));
+}
+
+static vector pixel __ATTRS_o_ai vec_splat(vector pixel __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1, b0, b1));
+}
+
+static vector signed int __ATTRS_o_ai vec_splat(vector signed int __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+ b1, b2, b3, b0, b1, b2, b3));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_splat(vector unsigned int __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+ b1, b2, b3, b0, b1, b2, b3));
+}
+
+static vector bool int __ATTRS_o_ai vec_splat(vector bool int __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+ b1, b2, b3, b0, b1, b2, b3));
+}
+
+static vector float __ATTRS_o_ai vec_splat(vector float __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+ b1, b2, b3, b0, b1, b2, b3));
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_splat(vector double __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+ b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+ b0, b1, b2, b3, b4, b5, b6, b7));
+}
+static vector bool long long __ATTRS_o_ai vec_splat(vector bool long long __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+ b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+ b0, b1, b2, b3, b4, b5, b6, b7));
+}
+static vector signed long long __ATTRS_o_ai
+vec_splat(vector signed long long __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+ b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+ b0, b1, b2, b3, b4, b5, b6, b7));
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_splat(vector unsigned long long __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+ b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+ b0, b1, b2, b3, b4, b5, b6, b7));
+}
+#endif
+
+/* vec_vspltb */
+
+#define __builtin_altivec_vspltb vec_vspltb
+
+static vector signed char __ATTRS_o_ai vec_vspltb(vector signed char __a,
+ unsigned char __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vspltb(vector unsigned char __a,
+ unsigned char __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b));
+}
+
+static vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a,
+ unsigned char __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b));
+}
+
+/* vec_vsplth */
+
+#define __builtin_altivec_vsplth vec_vsplth
+
+static vector short __ATTRS_o_ai vec_vsplth(vector short __a,
+ unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsplth(vector unsigned short __a,
+ unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+static vector bool short __ATTRS_o_ai vec_vsplth(vector bool short __a,
+ unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+static vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a,
+ unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+/* vec_vspltw */
+
+#define __builtin_altivec_vspltw vec_vspltw
+
+static vector int __ATTRS_o_ai vec_vspltw(vector int __a, unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vspltw(vector unsigned int __a,
+ unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+static vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a,
+ unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+static vector float __ATTRS_o_ai vec_vspltw(vector float __a,
+ unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+/* vec_splat_s8 */
+
+#define __builtin_altivec_vspltisb vec_splat_s8
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector signed char __ATTRS_o_ai vec_splat_s8(signed char __a) {
+ return (vector signed char)(__a);
+}
+
+/* vec_vspltisb */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector signed char __ATTRS_o_ai vec_vspltisb(signed char __a) {
+ return (vector signed char)(__a);
+}
+
+/* vec_splat_s16 */
+
+#define __builtin_altivec_vspltish vec_splat_s16
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector short __ATTRS_o_ai vec_splat_s16(signed char __a) {
+ return (vector short)(__a);
+}
+
+/* vec_vspltish */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector short __ATTRS_o_ai vec_vspltish(signed char __a) {
+ return (vector short)(__a);
+}
+
+/* vec_splat_s32 */
+
+#define __builtin_altivec_vspltisw vec_splat_s32
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector int __ATTRS_o_ai vec_splat_s32(signed char __a) {
+ return (vector int)(__a);
+}
+
+/* vec_vspltisw */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector int __ATTRS_o_ai vec_vspltisw(signed char __a) {
+ return (vector int)(__a);
+}
+
+/* vec_splat_u8 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned char __ATTRS_o_ai vec_splat_u8(unsigned char __a) {
+ return (vector unsigned char)(__a);
+}
+
+/* vec_splat_u16 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned short __ATTRS_o_ai vec_splat_u16(signed char __a) {
+ return (vector unsigned short)(__a);
+}
+
+/* vec_splat_u32 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned int __ATTRS_o_ai vec_splat_u32(signed char __a) {
+ return (vector unsigned int)(__a);
+}
+
+/* vec_sr */
+
+static vector signed char __ATTRS_o_ai vec_sr(vector signed char __a,
+ vector unsigned char __b) {
+ vector unsigned char __res = (vector unsigned char)__a >> __b;
+ return (vector signed char)__res;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sr(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a >> __b;
+}
+
+static vector signed short __ATTRS_o_ai vec_sr(vector signed short __a,
+ vector unsigned short __b) {
+ vector unsigned short __res = (vector unsigned short)__a >> __b;
+ return (vector signed short)__res;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sr(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a >> __b;
+}
+
+static vector signed int __ATTRS_o_ai vec_sr(vector signed int __a,
+ vector unsigned int __b) {
+ vector unsigned int __res = (vector unsigned int)__a >> __b;
+ return (vector signed int)__res;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sr(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a >> __b;
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_sr(vector signed long long __a, vector unsigned long long __b) {
+ vector unsigned long long __res = (vector unsigned long long)__a >> __b;
+ return (vector signed long long)__res;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a >> __b;
+}
+#endif
+
+/* vec_vsrb */
+
+#define __builtin_altivec_vsrb vec_vsrb
+
+static vector signed char __ATTRS_o_ai vec_vsrb(vector signed char __a,
+ vector unsigned char __b) {
+ return __a >> (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsrb(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a >> __b;
+}
+
+/* vec_vsrh */
+
+#define __builtin_altivec_vsrh vec_vsrh
+
+static vector short __ATTRS_o_ai vec_vsrh(vector short __a,
+ vector unsigned short __b) {
+ return __a >> (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsrh(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a >> __b;
+}
+
+/* vec_vsrw */
+
+#define __builtin_altivec_vsrw vec_vsrw
+
+static vector int __ATTRS_o_ai vec_vsrw(vector int __a,
+ vector unsigned int __b) {
+ return __a >> (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsrw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a >> __b;
+}
+
+/* vec_sra */
+
+static vector signed char __ATTRS_o_ai vec_sra(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sra(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_sra(vector short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sra(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_sra(vector int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsraw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sra(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_sra(vector signed long long __a, vector unsigned long long __b) {
+ return __a >> __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)((vector signed long long)__a >> __b);
+}
+#endif
+
+/* vec_vsrab */
+
+static vector signed char __ATTRS_o_ai vec_vsrab(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsrab(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+/* vec_vsrah */
+
+static vector short __ATTRS_o_ai vec_vsrah(vector short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsrah(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
+}
+
+/* vec_vsraw */
+
+static vector int __ATTRS_o_ai vec_vsraw(vector int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsraw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsraw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
+}
+
+/* vec_srl */
+
+static vector signed char __ATTRS_o_ai vec_srl(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_srl(vector signed char __a,
+ vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_srl(vector signed char __a,
+ vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_srl(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_srl(vector unsigned char __a,
+ vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_srl(vector unsigned char __a,
+ vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_srl(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_srl(vector bool char __a,
+ vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_srl(vector bool char __a,
+ vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_srl(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_srl(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_srl(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_srl(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_srl(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_srl(vector unsigned short __a,
+ vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_srl(vector bool short __a,
+ vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_srl(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_srl(vector bool short __a,
+ vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_srl(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_srl(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_srl(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_srl(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_srl(vector unsigned int __a,
+ vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_srl(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_srl(vector bool int __a,
+ vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_srl(vector bool int __a,
+ vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_srl(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_vsr */
+
+static vector signed char __ATTRS_o_ai vec_vsr(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsr(vector signed char __a,
+ vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsr(vector signed char __a,
+ vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsr(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsr(vector unsigned char __a,
+ vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsr(vector unsigned char __a,
+ vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsr(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsr(vector bool char __a,
+ vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsr(vector bool char __a,
+ vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsr(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsr(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsr(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsr(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsr(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsr(vector unsigned short __a,
+ vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsr(vector bool short __a,
+ vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsr(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsr(vector bool short __a,
+ vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsr(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsr(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsr(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsr(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsr(vector unsigned int __a,
+ vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsr(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsr(vector bool int __a,
+ vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsr(vector bool int __a,
+ vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsr(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_sro */
+
+static vector signed char __ATTRS_o_ai vec_sro(vector signed char __a,
+ vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_sro(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sro(vector unsigned char __a,
+ vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sro(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_sro(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_sro(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sro(vector unsigned short __a,
+ vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sro(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_sro(vector int __a, vector signed char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_sro(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sro(vector unsigned int __a,
+ vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sro(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_sro(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_sro(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+/* vec_vsro */
+
+static vector signed char __ATTRS_o_ai vec_vsro(vector signed char __a,
+ vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsro(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsro(vector unsigned char __a,
+ vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsro(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsro(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsro(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsro(vector unsigned short __a,
+ vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsro(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsro(vector int __a,
+ vector signed char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsro(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsro(vector unsigned int __a,
+ vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsro(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_vsro(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_vsro(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+/* vec_st */
+
+static void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector short __a, int __b, vector short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool short __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector pixel __a, int __b, vector pixel *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector int __a, int __b, vector int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector float __a, int __b, vector float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+/* vec_stvx */
+
+static void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
+ vector short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+ vector pixel *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector int __a, int __b, vector int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+/* vec_ste */
+
+static void __ATTRS_o_ai vec_ste(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvehx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool short __a, int __b, short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvewx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+/* vec_stvebx */
+
+static void __ATTRS_o_ai vec_stvebx(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvebx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+/* vec_stvehx */
+
+static void __ATTRS_o_ai vec_stvehx(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvehx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvehx(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+/* vec_stvewx */
+
+static void __ATTRS_o_ai vec_stvewx(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvewx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvewx(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+/* vec_stl */
+
+static void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector short __a, int __b, vector short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool short __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, vector pixel *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector int __a, int __b, vector int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector float __a, int __b, vector float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+/* vec_stvxl */
+
+static void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
+ vector short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
+ vector pixel *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector int __a, int __b, vector int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+/* vec_sub */
+
+static vector signed char __ATTRS_o_ai vec_sub(vector signed char __a,
+ vector signed char __b) {
+ return __a - __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_sub(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a - __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_sub(vector signed char __a,
+ vector bool char __b) {
+ return __a - (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sub(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a - __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sub(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a - __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sub(vector unsigned char __a,
+ vector bool char __b) {
+ return __a - (vector unsigned char)__b;
+}
+
+static vector short __ATTRS_o_ai vec_sub(vector short __a, vector short __b) {
+ return __a - __b;
+}
+
+static vector short __ATTRS_o_ai vec_sub(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a - __b;
+}
+
+static vector short __ATTRS_o_ai vec_sub(vector short __a,
+ vector bool short __b) {
+ return __a - (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sub(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a - __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sub(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a - __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sub(vector unsigned short __a,
+ vector bool short __b) {
+ return __a - (vector unsigned short)__b;
+}
+
+static vector int __ATTRS_o_ai vec_sub(vector int __a, vector int __b) {
+ return __a - __b;
+}
+
+static vector int __ATTRS_o_ai vec_sub(vector bool int __a, vector int __b) {
+ return (vector int)__a - __b;
+}
+
+static vector int __ATTRS_o_ai vec_sub(vector int __a, vector bool int __b) {
+ return __a - (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sub(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a - __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sub(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a - __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sub(vector unsigned int __a,
+ vector bool int __b) {
+ return __a - (vector unsigned int)__b;
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai vec_sub(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __a - __b;
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a - __b;
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_sub(vector signed long long __a, vector signed long long __b) {
+ return __a - __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_sub(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a - __b;
+}
+
+static vector double __ATTRS_o_ai
+vec_sub(vector double __a, vector double __b) {
+ return __a - __b;
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_sub(vector float __a, vector float __b) {
+ return __a - __b;
+}
+
+/* vec_vsububm */
+
+#define __builtin_altivec_vsububm vec_vsububm
+
+static vector signed char __ATTRS_o_ai vec_vsububm(vector signed char __a,
+ vector signed char __b) {
+ return __a - __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vsububm(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a - __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vsububm(vector signed char __a,
+ vector bool char __b) {
+ return __a - (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsububm(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a - __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsububm(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a - __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsububm(vector unsigned char __a,
+ vector bool char __b) {
+ return __a - (vector unsigned char)__b;
+}
+
+/* vec_vsubuhm */
+
+#define __builtin_altivec_vsubuhm vec_vsubuhm
+
+static vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
+ vector short __b) {
+ return __a - __b;
+}
+
+static vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a - __b;
+}
+
+static vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
+ vector bool short __b) {
+ return __a - (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector unsigned short __a, vector unsigned short __b) {
+ return __a - __b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a - __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsubuhm(vector unsigned short __a,
+ vector bool short __b) {
+ return __a - (vector unsigned short)__b;
+}
+
+/* vec_vsubuwm */
+
+#define __builtin_altivec_vsubuwm vec_vsubuwm
+
+static vector int __ATTRS_o_ai vec_vsubuwm(vector int __a, vector int __b) {
+ return __a - __b;
+}
+
+static vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a - __b;
+}
+
+static vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
+ vector bool int __b) {
+ return __a - (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuwm(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a - __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a - __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuwm(vector unsigned int __a,
+ vector bool int __b) {
+ return __a - (vector unsigned int)__b;
+}
+
+/* vec_vsubfp */
+
+#define __builtin_altivec_vsubfp vec_vsubfp
+
+static vector float __attribute__((__always_inline__))
+vec_vsubfp(vector float __a, vector float __b) {
+ return __a - __b;
+}
+
+/* vec_subc */
+
+static vector unsigned int __ATTRS_o_ai vec_subc(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsubcuw(__a, __b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector unsigned __int128 __ATTRS_o_ai
+vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+
+static vector signed __int128 __ATTRS_o_ai
+vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_vsubcuw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vsubcuw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vsubcuw(__a, __b);
+}
+
+/* vec_subs */
+
+static vector signed char __ATTRS_o_ai vec_subs(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vsubsbs(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_subs(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_subs(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_subs(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vsububs(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_subs(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_subs(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
+}
+
+static vector short __ATTRS_o_ai vec_subs(vector short __a, vector short __b) {
+ return __builtin_altivec_vsubshs(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_subs(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vsubshs((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_subs(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vsubshs(__a, (vector short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_subs(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_subs(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_subs(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
+}
+
+static vector int __ATTRS_o_ai vec_subs(vector int __a, vector int __b) {
+ return __builtin_altivec_vsubsws(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_subs(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vsubsws((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_subs(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vsubsws(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_subs(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsubuws(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_subs(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_subs(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
+}
+
+/* vec_vsubsbs */
+
+static vector signed char __ATTRS_o_ai vec_vsubsbs(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vsubsbs(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsubsbs(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsubsbs(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
+}
+
+/* vec_vsububs */
+
+static vector unsigned char __ATTRS_o_ai vec_vsububs(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vsububs(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsububs(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsububs(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
+}
+
+/* vec_vsubshs */
+
+static vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vsubshs(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vsubshs((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vsubshs(__a, (vector short)__b);
+}
+
+/* vec_vsubuhs */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsubuhs(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
+}
+
+/* vec_vsubsws */
+
+static vector int __ATTRS_o_ai vec_vsubsws(vector int __a, vector int __b) {
+ return __builtin_altivec_vsubsws(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vsubsws((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vsubsws(__a, (vector int)__b);
+}
+
+/* vec_vsubuws */
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuws(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsubuws(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuws(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuws(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+/* vec_vsubuqm */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) {
+ return __a - __b;
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a - __b;
+}
+
+/* vec_vsubeuqm */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vsubeuqm(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vsubeuqm(__a, __b, __c);
+}
+
+/* vec_vsubcuq */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+
+/* vec_vsubecuq */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vsubecuq(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vsubecuq(__a, __b, __c);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_sum4s */
+
+static vector int __ATTRS_o_ai vec_sum4s(vector signed char __a,
+ vector int __b) {
+ return __builtin_altivec_vsum4sbs(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sum4s(vector unsigned char __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsum4ubs(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_sum4s(vector signed short __a,
+ vector int __b) {
+ return __builtin_altivec_vsum4shs(__a, __b);
+}
+
+/* vec_vsum4sbs */
+
+static vector int __attribute__((__always_inline__))
+vec_vsum4sbs(vector signed char __a, vector int __b) {
+ return __builtin_altivec_vsum4sbs(__a, __b);
+}
+
+/* vec_vsum4ubs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vsum4ubs(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_altivec_vsum4ubs(__a, __b);
+}
+
+/* vec_vsum4shs */
+
+static vector int __attribute__((__always_inline__))
+vec_vsum4shs(vector signed short __a, vector int __b) {
+ return __builtin_altivec_vsum4shs(__a, __b);
+}
+
+/* vec_sum2s */
+
+/* The vsum2sws instruction has a big-endian bias, so that the second
+ input vector and the result always reference big-endian elements
+ 1 and 3 (little-endian element 0 and 2). For ease of porting the
+ programmer wants elements 1 and 3 in both cases, so for little
+ endian we must perform some permutes. */
+
+static vector signed int __attribute__((__always_inline__))
+vec_sum2s(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ vector int __c = (vector signed int)vec_perm(
+ __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+ __c = __builtin_altivec_vsum2sws(__a, __c);
+ return (vector signed int)vec_perm(
+ __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+#else
+ return __builtin_altivec_vsum2sws(__a, __b);
+#endif
+}
+
+/* vec_vsum2sws */
+
+static vector signed int __attribute__((__always_inline__))
+vec_vsum2sws(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ vector int __c = (vector signed int)vec_perm(
+ __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+ __c = __builtin_altivec_vsum2sws(__a, __c);
+ return (vector signed int)vec_perm(
+ __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+#else
+ return __builtin_altivec_vsum2sws(__a, __b);
+#endif
+}
+
+/* vec_sums */
+
+/* The vsumsws instruction has a big-endian bias, so that the second
+ input vector and the result always reference big-endian element 3
+ (little-endian element 0). For ease of porting the programmer
+ wants element 3 in both cases, so for little endian we must perform
+ some permutes. */
+
+static vector signed int __attribute__((__always_inline__))
+vec_sums(vector signed int __a, vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ __b = (vector signed int)vec_splat(__b, 3);
+ __b = __builtin_altivec_vsumsws(__a, __b);
+ return (vector signed int)(0, 0, 0, __b[0]);
+#else
+ return __builtin_altivec_vsumsws(__a, __b);
+#endif
+}
+
+/* vec_vsumsws */
+
+static vector signed int __attribute__((__always_inline__))
+vec_vsumsws(vector signed int __a, vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ __b = (vector signed int)vec_splat(__b, 3);
+ __b = __builtin_altivec_vsumsws(__a, __b);
+ return (vector signed int)(0, 0, 0, __b[0]);
+#else
+ return __builtin_altivec_vsumsws(__a, __b);
+#endif
+}
+
+/* vec_trunc */
+
+static vector float __ATTRS_o_ai
+vec_trunc(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspiz(__a);
+#else
+ return __builtin_altivec_vrfiz(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_trunc(vector double __a) {
+ return __builtin_vsx_xvrdpiz(__a);
+}
+#endif
+
+/* vec_vrfiz */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfiz(vector float __a) {
+ return __builtin_altivec_vrfiz(__a);
+}
+
+/* vec_unpackh */
+
+/* The vector unpack instructions all have a big-endian bias, so for
+ little endian we must reverse the meanings of "high" and "low." */
+
+static vector short __ATTRS_o_ai vec_unpackh(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsb((vector char)__a);
+#else
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_unpackh(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_unpackh(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsh(__a);
+#else
+ return __builtin_altivec_vupkhsh(__a);
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_unpackh(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_unpackh(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector long long __ATTRS_o_ai vec_unpackh(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsw(__a);
+#else
+ return __builtin_altivec_vupkhsw(__a);
+#endif
+}
+
+static vector bool long long __ATTRS_o_ai vec_unpackh(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_vupkhsb */
+
+static vector short __ATTRS_o_ai vec_vupkhsb(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsb((vector char)__a);
+#else
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_vupkhsb(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+/* vec_vupkhsh */
+
+static vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsh(__a);
+#else
+ return __builtin_altivec_vupkhsh(__a);
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_vupkhsh(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vupkhsh(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#endif
+}
+
+/* vec_vupkhsw */
+
+#ifdef __POWER8_VECTOR__
+static vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsw(__a);
+#else
+ return __builtin_altivec_vupkhsw(__a);
+#endif
+}
+
+static vector bool long long __ATTRS_o_ai vec_vupkhsw(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_unpackl */
+
+static vector short __ATTRS_o_ai vec_unpackl(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return __builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_unpackl(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_unpackl(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsh(__a);
+#else
+ return __builtin_altivec_vupklsh(__a);
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_unpackl(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_unpackl(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector long long __ATTRS_o_ai vec_unpackl(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsw(__a);
+#else
+ return __builtin_altivec_vupklsw(__a);
+#endif
+}
+
+static vector bool long long __ATTRS_o_ai vec_unpackl(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_vupklsb */
+
+static vector short __ATTRS_o_ai vec_vupklsb(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return __builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_vupklsb(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+/* vec_vupklsh */
+
+static vector int __ATTRS_o_ai vec_vupklsh(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsh(__a);
+#else
+ return __builtin_altivec_vupklsh(__a);
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_vupklsh(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vupklsh(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#endif
+}
+
+/* vec_vupklsw */
+
+#ifdef __POWER8_VECTOR__
+static vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsw(__a);
+#else
+ return __builtin_altivec_vupklsw(__a);
+#endif
+}
+
+static vector bool long long __ATTRS_o_ai vec_vupklsw(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_vsx_ld */
+
+#ifdef __VSX__
+
+static vector signed int __ATTRS_o_ai vec_vsx_ld(int __a,
+ const vector signed int *__b) {
+ return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_vsx_ld(int __a, const vector float *__b) {
+ return (vector float)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed long long *__b) {
+ return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned long long *__b) {
+ return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static vector double __ATTRS_o_ai vec_vsx_ld(int __a,
+ const vector double *__b) {
+ return (vector double)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+#endif
+
+/* vec_vsx_st */
+
+#ifdef __VSX__
+
+static void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
+ vector signed int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_vsx_st(vector signed long long __a, int __b,
+ vector signed long long *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a, int __b,
+ vector unsigned long long *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
+ vector double *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+#endif
+
+/* vec_xor */
+
+#define __builtin_altivec_vxor vec_xor
+
+static vector signed char __ATTRS_o_ai vec_xor(vector signed char __a,
+ vector signed char __b) {
+ return __a ^ __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_xor(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a ^ __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_xor(vector signed char __a,
+ vector bool char __b) {
+ return __a ^ (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_xor(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a ^ __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a,
+ vector bool char __b) {
+ return __a ^ (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_xor(vector bool char __a,
+ vector bool char __b) {
+ return __a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_xor(vector short __a, vector short __b) {
+ return __a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_xor(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_xor(vector short __a,
+ vector bool short __b) {
+ return __a ^ (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_xor(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_xor(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a ^ __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_xor(vector unsigned short __a,
+ vector bool short __b) {
+ return __a ^ (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_xor(vector bool short __a,
+ vector bool short __b) {
+ return __a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_xor(vector int __a, vector int __b) {
+ return __a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_xor(vector bool int __a, vector int __b) {
+ return (vector int)__a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_xor(vector int __a, vector bool int __b) {
+ return __a ^ (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_xor(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_xor(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a ^ __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_xor(vector unsigned int __a,
+ vector bool int __b) {
+ return __a ^ (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_xor(vector bool int __a,
+ vector bool int __b) {
+ return __a ^ __b;
+}
+
+static vector float __ATTRS_o_ai vec_xor(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_xor(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_xor(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_xor(vector signed long long __a, vector signed long long __b) {
+ return __a ^ __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a ^ __b;
+}
+
+static vector signed long long __ATTRS_o_ai vec_xor(vector signed long long __a,
+ vector bool long long __b) {
+ return __a ^ (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_xor(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a ^ __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_xor(vector unsigned long long __a, vector bool long long __b) {
+ return __a ^ (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_xor(vector bool long long __a,
+ vector bool long long __b) {
+ return __a ^ __b;
+}
+
+static vector double __ATTRS_o_ai
+vec_xor(vector double __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a ^
+ (vector unsigned long long)__b);
+}
+
+static vector double __ATTRS_o_ai
+vec_xor(vector double __a, vector bool long long __b) {
+ return (vector double)((vector unsigned long long)__a ^
+ (vector unsigned long long) __b);
+}
+
+static vector double __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a ^
+ (vector unsigned long long)__b);
+}
+#endif
+
+/* vec_vxor */
+
+static vector signed char __ATTRS_o_ai vec_vxor(vector signed char __a,
+ vector signed char __b) {
+ return __a ^ __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vxor(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a ^ __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vxor(vector signed char __a,
+ vector bool char __b) {
+ return __a ^ (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vxor(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vxor(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a ^ __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vxor(vector unsigned char __a,
+ vector bool char __b) {
+ return __a ^ (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a,
+ vector bool char __b) {
+ return __a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_vxor(vector short __a, vector short __b) {
+ return __a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_vxor(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_vxor(vector short __a,
+ vector bool short __b) {
+ return __a ^ (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vxor(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vxor(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a ^ __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vxor(vector unsigned short __a,
+ vector bool short __b) {
+ return __a ^ (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_vxor(vector bool short __a,
+ vector bool short __b) {
+ return __a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_vxor(vector int __a, vector int __b) {
+ return __a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_vxor(vector bool int __a, vector int __b) {
+ return (vector int)__a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_vxor(vector int __a, vector bool int __b) {
+ return __a ^ (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vxor(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vxor(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a ^ __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vxor(vector unsigned int __a,
+ vector bool int __b) {
+ return __a ^ (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a,
+ vector bool int __b) {
+ return __a ^ __b;
+}
+
+static vector float __ATTRS_o_ai vec_vxor(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vxor(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vxor(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_vxor(vector signed long long __a, vector signed long long __b) {
+ return __a ^ __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vxor(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a ^ __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vxor(vector signed long long __a, vector bool long long __b) {
+ return __a ^ (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vxor(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vxor(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a ^ __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vxor(vector unsigned long long __a, vector bool long long __b) {
+ return __a ^ (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_vxor(vector bool long long __a,
+ vector bool long long __b) {
+ return __a ^ __b;
+}
+#endif
+
+/* ------------------------ extensions for CBEA ----------------------------- */
+
+/* vec_extract */
+
+static signed char __ATTRS_o_ai vec_extract(vector signed char __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned char __ATTRS_o_ai vec_extract(vector unsigned char __a,
+ int __b) {
+ return __a[__b];
+}
+
+static unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
+ int __b) {
+ return __a[__b];
+}
+
+static signed short __ATTRS_o_ai vec_extract(vector signed short __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned short __ATTRS_o_ai vec_extract(vector unsigned short __a,
+ int __b) {
+ return __a[__b];
+}
+
+static unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
+ int __b) {
+ return __a[__b];
+}
+
+static signed int __ATTRS_o_ai vec_extract(vector signed int __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned int __ATTRS_o_ai vec_extract(vector bool int __a, int __b) {
+ return __a[__b];
+}
+
+#ifdef __VSX__
+static signed long long __ATTRS_o_ai vec_extract(vector signed long long __a,
+ int __b) {
+ return __a[__b];
+}
+
+static unsigned long long __ATTRS_o_ai
+vec_extract(vector unsigned long long __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned long long __ATTRS_o_ai vec_extract(vector bool long long __a,
+ int __b) {
+ return __a[__b];
+}
+
+static double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
+ return __a[__b];
+}
+#endif
+
+static float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
+ return __a[__b];
+}
+
+/* vec_insert */
+
+static vector signed char __ATTRS_o_ai vec_insert(signed char __a,
+ vector signed char __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_insert(unsigned char __a,
+ vector unsigned char __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
+ vector bool char __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector signed short __ATTRS_o_ai vec_insert(signed short __a,
+ vector signed short __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_insert(unsigned short __a,
+ vector unsigned short __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector bool short __ATTRS_o_ai vec_insert(unsigned short __a,
+ vector bool short __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector signed int __ATTRS_o_ai vec_insert(signed int __a,
+ vector signed int __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_insert(unsigned int __a,
+ vector unsigned int __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
+ vector bool int __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_insert(signed long long __a, vector signed long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+static vector double __ATTRS_o_ai vec_insert(double __a, vector double __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_insert(float __a, vector float __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+/* vec_lvlx */
+
+static vector signed char __ATTRS_o_ai vec_lvlx(int __a,
+ const signed char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector signed char __ATTRS_o_ai vec_lvlx(int __a,
+ const vector signed char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvlx(int __a,
+ const unsigned char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlx(int __a, const vector unsigned char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool char __ATTRS_o_ai vec_lvlx(int __a,
+ const vector bool char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector bool char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector short __ATTRS_o_ai vec_lvlx(int __a, const short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
+}
+
+static vector short __ATTRS_o_ai vec_lvlx(int __a, const vector short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvlx(int __a,
+ const unsigned short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlx(int __a, const vector unsigned short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool short __ATTRS_o_ai vec_lvlx(int __a,
+ const vector bool short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector bool short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector pixel __ATTRS_o_ai vec_lvlx(int __a, const vector pixel *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector pixel)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
+}
+
+static vector int __ATTRS_o_ai vec_lvlx(int __a, const vector int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvlx(int __a,
+ const unsigned int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlx(int __a, const vector unsigned int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool int __ATTRS_o_ai vec_lvlx(int __a,
+ const vector bool int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector bool int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector float __ATTRS_o_ai vec_lvlx(int __a, const float *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
+}
+
+static vector float __ATTRS_o_ai vec_lvlx(int __a, const vector float *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector float)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_lvlxl */
+
+static vector signed char __ATTRS_o_ai vec_lvlxl(int __a,
+ const signed char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvlxl(int __a, const vector signed char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvlxl(int __a,
+ const unsigned char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlxl(int __a, const vector unsigned char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool char __ATTRS_o_ai vec_lvlxl(int __a,
+ const vector bool char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector bool char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector short __ATTRS_o_ai vec_lvlxl(int __a, const short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
+}
+
+static vector short __ATTRS_o_ai vec_lvlxl(int __a, const vector short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvlxl(int __a,
+ const unsigned short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlxl(int __a, const vector unsigned short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool short __ATTRS_o_ai vec_lvlxl(int __a,
+ const vector bool short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector bool short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector pixel __ATTRS_o_ai vec_lvlxl(int __a, const vector pixel *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector pixel)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
+}
+
+static vector int __ATTRS_o_ai vec_lvlxl(int __a, const vector int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvlxl(int __a,
+ const unsigned int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlxl(int __a, const vector unsigned int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool int __ATTRS_o_ai vec_lvlxl(int __a,
+ const vector bool int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector bool int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector float __ATTRS_o_ai vec_lvlxl(int __a, const float *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
+}
+
+static vector float __ATTRS_o_ai vec_lvlxl(int __a, vector float *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector float)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_lvrx */
+
+static vector signed char __ATTRS_o_ai vec_lvrx(int __a,
+ const signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector signed char __ATTRS_o_ai vec_lvrx(int __a,
+ const vector signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvrx(int __a,
+ const unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrx(int __a, const vector unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool char __ATTRS_o_ai vec_lvrx(int __a,
+ const vector bool char *__b) {
+ return vec_perm((vector bool char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector short __ATTRS_o_ai vec_lvrx(int __a, const short *__b) {
+ return vec_perm((vector short)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector short __ATTRS_o_ai vec_lvrx(int __a, const vector short *__b) {
+ return vec_perm((vector short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvrx(int __a,
+ const unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrx(int __a, const vector unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool short __ATTRS_o_ai vec_lvrx(int __a,
+ const vector bool short *__b) {
+ return vec_perm((vector bool short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector pixel __ATTRS_o_ai vec_lvrx(int __a, const vector pixel *__b) {
+ return vec_perm((vector pixel)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) {
+ return vec_perm((vector int)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector int __ATTRS_o_ai vec_lvrx(int __a, const vector int *__b) {
+ return vec_perm((vector int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvrx(int __a,
+ const unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrx(int __a, const vector unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool int __ATTRS_o_ai vec_lvrx(int __a,
+ const vector bool int *__b) {
+ return vec_perm((vector bool int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector float __ATTRS_o_ai vec_lvrx(int __a, const float *__b) {
+ return vec_perm((vector float)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector float __ATTRS_o_ai vec_lvrx(int __a, const vector float *__b) {
+ return vec_perm((vector float)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_lvrxl */
+
+static vector signed char __ATTRS_o_ai vec_lvrxl(int __a,
+ const signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvrxl(int __a, const vector signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvrxl(int __a,
+ const unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrxl(int __a, const vector unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool char __ATTRS_o_ai vec_lvrxl(int __a,
+ const vector bool char *__b) {
+ return vec_perm((vector bool char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector short __ATTRS_o_ai vec_lvrxl(int __a, const short *__b) {
+ return vec_perm((vector short)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector short __ATTRS_o_ai vec_lvrxl(int __a, const vector short *__b) {
+ return vec_perm((vector short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvrxl(int __a,
+ const unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrxl(int __a, const vector unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool short __ATTRS_o_ai vec_lvrxl(int __a,
+ const vector bool short *__b) {
+ return vec_perm((vector bool short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector pixel __ATTRS_o_ai vec_lvrxl(int __a, const vector pixel *__b) {
+ return vec_perm((vector pixel)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) {
+ return vec_perm((vector int)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector int __ATTRS_o_ai vec_lvrxl(int __a, const vector int *__b) {
+ return vec_perm((vector int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvrxl(int __a,
+ const unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrxl(int __a, const vector unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool int __ATTRS_o_ai vec_lvrxl(int __a,
+ const vector bool int *__b) {
+ return vec_perm((vector bool int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector float __ATTRS_o_ai vec_lvrxl(int __a, const float *__b) {
+ return vec_perm((vector float)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector float __ATTRS_o_ai vec_lvrxl(int __a, const vector float *__b) {
+ return vec_perm((vector float)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_stvlx */
+
+static void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector short __a, int __b, short *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
+ vector short *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector int __a, int __b, int *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector int __a, int __b, vector int *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector float __a, int __b,
+ vector float *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_stvlxl */
+
+static void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b, short *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
+ vector short *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b, int *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b, vector int *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b,
+ vector float *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_stvrx */
+
+static void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector short __a, int __b, short *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
+ vector short *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector int __a, int __b, int *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector int __a, int __b, vector int *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector float __a, int __b,
+ vector float *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_stvrxl */
+
+static void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b, short *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
+ vector short *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b, int *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b, vector int *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
+ vector float *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_promote */
+
+static vector signed char __ATTRS_o_ai vec_promote(signed char __a, int __b) {
+ vector signed char __res = (vector signed char)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_promote(unsigned char __a,
+ int __b) {
+ vector unsigned char __res = (vector unsigned char)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
+ vector short __res = (vector short)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_promote(unsigned short __a,
+ int __b) {
+ vector unsigned short __res = (vector unsigned short)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
+ vector int __res = (vector int)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a, int __b) {
+ vector unsigned int __res = (vector unsigned int)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
+ vector float __res = (vector float)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+/* vec_splats */
+
+static vector signed char __ATTRS_o_ai vec_splats(signed char __a) {
+ return (vector signed char)(__a);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_splats(unsigned char __a) {
+ return (vector unsigned char)(__a);
+}
+
+static vector short __ATTRS_o_ai vec_splats(short __a) {
+ return (vector short)(__a);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_splats(unsigned short __a) {
+ return (vector unsigned short)(__a);
+}
+
+static vector int __ATTRS_o_ai vec_splats(int __a) { return (vector int)(__a); }
+
+static vector unsigned int __ATTRS_o_ai vec_splats(unsigned int __a) {
+ return (vector unsigned int)(__a);
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai vec_splats(signed long long __a) {
+ return (vector signed long long)(__a);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_splats(unsigned long long __a) {
+ return (vector unsigned long long)(__a);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai vec_splats(signed __int128 __a) {
+ return (vector signed __int128)(__a);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_splats(unsigned __int128 __a) {
+ return (vector unsigned __int128)(__a);
+}
+
+#endif
+
+static vector double __ATTRS_o_ai vec_splats(double __a) {
+ return (vector double)(__a);
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_splats(float __a) {
+ return (vector float)(__a);
+}
+
+/* ----------------------------- predicates --------------------------------- */
+
+/* vec_all_eq */
+
+static int __ATTRS_o_ai vec_all_eq(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector pixel __a, vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+ vector long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_eq(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_eq(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b);
+}
+#endif
+
+/* vec_all_ge */
+
+static int __ATTRS_o_ai vec_all_ge(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a);
+}
+static int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_ge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_ge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b);
+}
+#endif
+
+/* vec_all_gt */
+
+static int __ATTRS_o_ai vec_all_gt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, (vector signed char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b);
+}
+static int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_gt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_gt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b);
+}
+#endif
+
+/* vec_all_in */
+
+static int __attribute__((__always_inline__))
+vec_all_in(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp_p(__CR6_EQ, __a, __b);
+}
+
+/* vec_all_le */
+
+static int __ATTRS_o_ai vec_all_le(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, (vector signed char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_le(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_le(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_le(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a);
+}
+#endif
+
+/* vec_all_lt */
+
+static int __ATTRS_o_ai vec_all_lt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_lt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_lt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a);
+}
+#endif
+
+/* vec_all_nan */
+
+static int __ATTRS_o_ai vec_all_nan(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_nan(vector double __a) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a);
+}
+#endif
+
+/* vec_all_ne */
+
+static int __ATTRS_o_ai vec_all_ne(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector pixel __a, vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_ne(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_ne(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
+/* vec_all_nge */
+
+static int __ATTRS_o_ai
+vec_all_nge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai
+vec_all_nge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
+/* vec_all_ngt */
+
+static int __ATTRS_o_ai
+vec_all_ngt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai
+vec_all_ngt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
+/* vec_all_nle */
+
+static int __attribute__((__always_inline__))
+vec_all_nle(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a);
+}
+
+/* vec_all_nlt */
+
+static int __attribute__((__always_inline__))
+vec_all_nlt(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a);
+}
+
+/* vec_all_numeric */
+
+static int __attribute__((__always_inline__))
+vec_all_numeric(vector float __a) {
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a);
+}
+
+/* vec_any_eq */
+
+static int __ATTRS_o_ai vec_any_eq(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector pixel __a, vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_eq(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_eq(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
+/* vec_any_ge */
+
+static int __ATTRS_o_ai vec_any_ge(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV,
+ (vector signed long long)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_ge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_ge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
+/* vec_any_gt */
+
+static int __ATTRS_o_ai vec_any_gt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a,
+ (vector signed char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_gt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_gt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
+/* vec_any_le */
+
+static int __ATTRS_o_ai vec_any_le(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a,
+ (vector signed char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_le(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_le(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_le(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a);
+}
+#endif
+
+/* vec_any_lt */
+
+static int __ATTRS_o_ai vec_any_lt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV,
+ (vector signed long long)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_lt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_lt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a);
+}
+#endif
+
+/* vec_any_nan */
+
+static int __attribute__((__always_inline__)) vec_any_nan(vector float __a) {
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a);
+}
+
+/* vec_any_ne */
+
+static int __ATTRS_o_ai vec_any_ne(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector pixel __a, vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_ne(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_ne(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
+/* vec_any_nge */
+
+static int __attribute__((__always_inline__))
+vec_any_nge(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b);
+}
+
+/* vec_any_ngt */
+
+static int __attribute__((__always_inline__))
+vec_any_ngt(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b);
+}
+
+/* vec_any_nle */
+
+static int __attribute__((__always_inline__))
+vec_any_nle(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a);
+}
+
+/* vec_any_nlt */
+
+static int __attribute__((__always_inline__))
+vec_any_nlt(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a);
+}
+
+/* vec_any_numeric */
+
+static int __attribute__((__always_inline__))
+vec_any_numeric(vector float __a) {
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a);
+}
+
+/* vec_any_out */
+
+static int __attribute__((__always_inline__))
+vec_any_out(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b);
+}
+
+/* Power 8 Crypto functions
+Note: We diverge from the current GCC implementation with regard
+to cryptography and related functions as follows:
+- Only the SHA and AES instructions and builtins are disabled by -mno-crypto
+- The remaining ones are only available on Power8 and up so
+ require -mpower8-vector
+The justification for this is that export requirements require that
+Category:Vector.Crypto is optional (i.e. compliant hardware may not provide
+support). As a result, we need to be able to turn off support for those.
+The remaining ones (currently controlled by -mcrypto for GCC) still
+need to be provided on compliant hardware even if Vector.Crypto is not
+provided.
+*/
+#ifdef __CRYPTO__
+#define vec_sbox_be __builtin_altivec_crypto_vsbox
+#define vec_cipher_be __builtin_altivec_crypto_vcipher
+#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast
+#define vec_ncipher_be __builtin_altivec_crypto_vncipher
+#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
+
+static vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vsbox(vector unsigned long long __a) {
+ return __builtin_altivec_crypto_vsbox(__a);
+}
+
+static vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vcipher(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vcipher(__a, __b);
+}
+
+static vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vcipherlast(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vcipherlast(__a, __b);
+}
+
+static vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vncipher(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vncipher(__a, __b);
+}
+
+static vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vncipherlast(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vncipherlast(__a, __b);
+}
+
+#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
+#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
+
+#define vec_shasigma_be(X, Y, Z) \
+ _Generic((X), vector unsigned int: __builtin_crypto_vshasigmaw, \
+ vector unsigned long long: __builtin_crypto_vshasigmad) \
+((X), (Y), (Z))
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector unsigned char __ATTRS_o_ai
+__builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+__builtin_crypto_vpermxor(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector unsigned short)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
+}
+
+static vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor(
+ vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
+ return (vector unsigned int)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
+}
+
+static vector unsigned long long __ATTRS_o_ai __builtin_crypto_vpermxor(
+ vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return (vector unsigned long long)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
+}
+
+static vector unsigned char __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_crypto_vpmsumb(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_crypto_vpmsumh(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_crypto_vpmsumw(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vpmsumd(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vgbbd (vector signed char __a)
+{
+ return __builtin_altivec_vgbbd((vector unsigned char) __a);
+}
+
+#define vec_pmsum_be __builtin_crypto_vpmsumb
+#define vec_gb __builtin_altivec_vgbbd
+
+static vector unsigned char __ATTRS_o_ai vec_vgbbd (vector unsigned char __a)
+{
+ return __builtin_altivec_vgbbd(__a);
+}
+
+static vector long long __ATTRS_o_ai
+vec_vbpermq (vector signed char __a, vector signed char __b)
+{
+ return __builtin_altivec_vbpermq((vector unsigned char) __a,
+ (vector unsigned char) __b);
+}
+
+static vector long long __ATTRS_o_ai
+vec_vbpermq (vector unsigned char __a, vector unsigned char __b)
+{
+ return __builtin_altivec_vbpermq(__a, __b);
+}
+
+#ifdef __powerpc64__
+static vector unsigned long long __attribute__((__always_inline__))
+vec_bperm (vector unsigned __int128 __a, vector unsigned char __b) {
+ return __builtin_altivec_vbpermq((vector unsigned char) __a,
+ (vector unsigned char) __b);
+}
+#endif
+#endif
+
+#undef __ATTRS_o_ai
+
+#endif /* __ALTIVEC_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/ammintrin.h b/clang-2690385/lib64/clang/3.8/include/ammintrin.h
new file mode 100644
index 00000000..4880fd7e
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/ammintrin.h
@@ -0,0 +1,209 @@
+/*===---- ammintrin.h - SSE4a intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __AMMINTRIN_H
+#define __AMMINTRIN_H
+
+#include <pmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a")))
+
+/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
+/// integer vector operand at the index idx and of the length len.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_extracti_si64(__m128i x, const int len, const int idx);
+/// \endcode
+///
+/// \code
+/// This intrinsic corresponds to the \c EXTRQ instruction.
+/// \endcode
+///
+/// \param x
+/// The value from which bits are extracted.
+/// \param len
+/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]
+/// are zero, the length is interpreted as 64.
+/// \param idx
+/// Bits [5:0] specify the index of the least significant bit; the other
+/// bits are ignored. If the sum of the index and length is greater than
+/// 64, the result is undefined. If the length and index are both zero,
+/// bits [63:0] of parameter x are extracted. If the length is zero
+/// but the index is non-zero, the result is undefined.
+/// \returns A 128-bit integer vector whose lower 64 bits contain the bits
+/// extracted from the source operand.
+#define _mm_extracti_si64(x, len, idx) \
+ ((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(x), \
+ (char)(len), (char)(idx)))
+
+/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
+/// integer vector operand at the index and of the length specified by __y.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// This intrinsic corresponds to the \c EXTRQ instruction.
+/// \endcode
+///
+/// \param __x
+/// The value from which bits are extracted.
+/// \param __y
+/// Specifies the index of the least significant bit at [13:8]
+/// and the length at [5:0]; all other bits are ignored.
+/// If bits [5:0] are zero, the length is interpreted as 64.
+/// If the sum of the index and length is greater than 64, the result is
+/// undefined. If the length and index are both zero, bits [63:0] of
+/// parameter __x are extracted. If the length is zero but the index is
+/// non-zero, the result is undefined.
+/// \returns A 128-bit vector whose lower 64 bits contain the bits extracted
+/// from the source operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_extract_si64(__m128i __x, __m128i __y)
+{
+ return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);
+}
+
+/// \brief Inserts bits of a specified length from the source integer vector
+/// y into the lower 64 bits of the destination integer vector x at the
+/// index idx and of the length len.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_inserti_si64(__m128i x, __m128i y, const int len,
+/// const int idx);
+/// \endcode
+///
+/// \code
+/// This intrinsic corresponds to the \c INSERTQ instruction.
+/// \endcode
+///
+/// \param x
+/// The destination operand where bits will be inserted. The inserted bits
+/// are defined by the length len and by the index idx specifying the least
+/// significant bit.
+/// \param y
+/// The source operand containing the bits to be extracted. The extracted
+/// bits are the least significant bits of operand y of length len.
+/// \param len
+/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]
+/// are zero, the length is interpreted as 64.
+/// \param idx
+/// Bits [5:0] specify the index of the least significant bit; the other
+/// bits are ignored. If the sum of the index and length is greater than
+/// 64, the result is undefined. If the length and index are both zero,
+/// bits [63:0] of parameter y are inserted into parameter x. If the
+/// length is zero but the index is non-zero, the result is undefined.
+/// \returns A 128-bit integer vector containing the original lower 64-bits
+/// of destination operand x with the specified bitfields replaced by the
+/// lower bits of source operand y. The upper 64 bits of the return value
+/// are undefined.
+
+#define _mm_inserti_si64(x, y, len, idx) \
+ ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \
+ (__v2di)(__m128i)(y), \
+ (char)(len), (char)(idx)))
+
+/// \brief Inserts bits of a specified length from the source integer vector
+/// __y into the lower 64 bits of the destination integer vector __x at
+/// the index and of the length specified by __y.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// This intrinsic corresponds to the \c INSERTQ instruction.
+/// \endcode
+///
+/// \param __x
+/// The destination operand where bits will be inserted. The inserted bits
+/// are defined by the length and by the index of the least significant bit
+/// specified by operand __y.
+/// \param __y
+/// The source operand containing the bits to be extracted. The extracted
+/// bits are the least significant bits of operand __y with length specified
+/// by bits [69:64]. These are inserted into the destination at the index
+/// specified by bits [77:72]; all other bits are ignored.
+/// If bits [69:64] are zero, the length is interpreted as 64.
+/// If the sum of the index and length is greater than 64, the result is
+/// undefined. If the length and index are both zero, bits [63:0] of
+/// parameter __y are inserted into parameter __x. If the length
+/// is zero but the index is non-zero, the result is undefined.
+/// \returns A 128-bit integer vector containing the original lower 64-bits
+/// of destination operand __x with the specified bitfields replaced by the
+/// lower bits of source operand __y. The upper 64 bits of the return value
+/// are undefined.
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_insert_si64(__m128i __x, __m128i __y)
+{
+ return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y);
+}
+
+/// \brief Stores a 64-bit double-precision value in a 64-bit memory location.
+/// To minimize caching, the data is flagged as non-temporal (unlikely to be
+/// used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// This intrinsic corresponds to the \c MOVNTSD instruction.
+/// \endcode
+///
+/// \param __p
+/// The 64-bit memory location used to store the register value.
+/// \param __a
+/// The 64-bit double-precision floating-point register value to
+/// be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_sd(double *__p, __m128d __a)
+{
+ __builtin_ia32_movntsd(__p, (__v2df)__a);
+}
+
+/// \brief Stores a 32-bit single-precision floating-point value in a 32-bit
+/// memory location. To minimize caching, the data is flagged as
+/// non-temporal (unlikely to be used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// This intrinsic corresponds to the \c MOVNTSS instruction.
+/// \endcode
+///
+/// \param __p
+/// The 32-bit memory location used to store the register value.
+/// \param __a
+/// The 32-bit single-precision floating-point register value to
+/// be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_ss(float *__p, __m128 __a)
+{
+ __builtin_ia32_movntss(__p, (__v4sf)__a);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AMMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/arm_acle.h b/clang-2690385/lib64/clang/3.8/include/arm_acle.h
new file mode 100644
index 00000000..4be1d097
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/arm_acle.h
@@ -0,0 +1,308 @@
+/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_ACLE_H
+#define __ARM_ACLE_H
+
+#ifndef __ARM_ACLE
+#error "ACLE intrinsics support not enabled."
+#endif
+
+#include <stdint.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
+/* 8.3 Memory barriers */
+#if !defined(_MSC_VER)
+#define __dmb(i) __builtin_arm_dmb(i)
+#define __dsb(i) __builtin_arm_dsb(i)
+#define __isb(i) __builtin_arm_isb(i)
+#endif
+
+/* 8.4 Hints */
+
+#if !defined(_MSC_VER)
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
+ __builtin_arm_wfi();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
+ __builtin_arm_wfe();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
+ __builtin_arm_sev();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
+ __builtin_arm_sevl();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
+ __builtin_arm_yield();
+}
+#endif
+
+#if __ARM_32BIT_STATE
+#define __dbg(t) __builtin_arm_dbg(t)
+#endif
+
+/* 8.5 Swap */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __swp(uint32_t x, volatile uint32_t *p) {
+ uint32_t v;
+ do v = __builtin_arm_ldrex(p); while (__builtin_arm_strex(x, p));
+ return v;
+}
+
+/* 8.6 Memory prefetch intrinsics */
+/* 8.6.1 Data prefetch */
+#define __pld(addr) __pldx(0, 0, 0, addr)
+
+#if __ARM_32BIT_STATE
+#define __pldx(access_kind, cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, access_kind, 1)
+#else
+#define __pldx(access_kind, cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
+#endif
+
+/* 8.6.2 Instruction prefetch */
+#define __pli(addr) __plix(0, 0, addr)
+
+#if __ARM_32BIT_STATE
+#define __plix(cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, 0, 0)
+#else
+#define __plix(cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
+#endif
+
+/* 8.7 NOP */
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
+ __builtin_arm_nop();
+}
+
+/* 9 DATA-PROCESSING INTRINSICS */
+/* 9.2 Miscellaneous data-processing intrinsics */
+/* ROR */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __ror(uint32_t x, uint32_t y) {
+ y %= 32;
+ if (y == 0) return x;
+ return (x >> y) | (x << (32 - y));
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+ __rorll(uint64_t x, uint32_t y) {
+ y %= 64;
+ if (y == 0) return x;
+ return (x >> y) | (x << (64 - y));
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __rorl(unsigned long x, uint32_t y) {
+#if __SIZEOF_LONG__ == 4
+ return __ror(x, y);
+#else
+ return __rorll(x, y);
+#endif
+}
+
+
+/* CLZ */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __clz(uint32_t t) {
+ return __builtin_clz(t);
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __clzl(unsigned long t) {
+ return __builtin_clzl(t);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+ __clzll(uint64_t t) {
+ return __builtin_clzll(t);
+}
+
+/* REV */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __rev(uint32_t t) {
+ return __builtin_bswap32(t);
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __revl(unsigned long t) {
+#if __SIZEOF_LONG__ == 4
+ return __builtin_bswap32(t);
+#else
+ return __builtin_bswap64(t);
+#endif
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+ __revll(uint64_t t) {
+ return __builtin_bswap64(t);
+}
+
+/* REV16 */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __rev16(uint32_t t) {
+ return __ror(__rev(t), 16);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+ __rev16ll(uint64_t t) {
+ return (((uint64_t)__rev16(t >> 32)) << 32) | __rev16(t);
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __rev16l(unsigned long t) {
+#if __SIZEOF_LONG__ == 4
+ return __rev16(t);
+#else
+ return __rev16ll(t);
+#endif
+}
+
+/* REVSH */
+static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
+ __revsh(int16_t t) {
+ return __builtin_bswap16(t);
+}
+
+/* RBIT */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __rbit(uint32_t t) {
+ return __builtin_arm_rbit(t);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+ __rbitll(uint64_t t) {
+#if __ARM_32BIT_STATE
+ return (((uint64_t) __builtin_arm_rbit(t)) << 32) |
+ __builtin_arm_rbit(t >> 32);
+#else
+ return __builtin_arm_rbit64(t);
+#endif
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __rbitl(unsigned long t) {
+#if __SIZEOF_LONG__ == 4
+ return __rbit(t);
+#else
+ return __rbitll(t);
+#endif
+}
+
+/*
+ * 9.4 Saturating intrinsics
+ *
+ * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
+ * intrinsics are implemented and the flag is enabled.
+ */
+/* 9.4.1 Width-specified saturation intrinsics */
+#if __ARM_32BIT_STATE
+#define __ssat(x, y) __builtin_arm_ssat(x, y)
+#define __usat(x, y) __builtin_arm_usat(x, y)
+#endif
+
+/* 9.4.2 Saturating addition and subtraction intrinsics */
+#if __ARM_32BIT_STATE
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+ __qadd(int32_t t, int32_t v) {
+ return __builtin_arm_qadd(t, v);
+}
+
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+ __qsub(int32_t t, int32_t v) {
+ return __builtin_arm_qsub(t, v);
+}
+
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__qdbl(int32_t t) {
+ return __builtin_arm_qadd(t, t);
+}
+#endif
+
+/* 9.7 CRC32 intrinsics */
+#if __ARM_FEATURE_CRC32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32b(uint32_t a, uint8_t b) {
+ return __builtin_arm_crc32b(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32h(uint32_t a, uint16_t b) {
+ return __builtin_arm_crc32h(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32w(uint32_t a, uint32_t b) {
+ return __builtin_arm_crc32w(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32d(uint32_t a, uint64_t b) {
+ return __builtin_arm_crc32d(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32cb(uint32_t a, uint8_t b) {
+ return __builtin_arm_crc32cb(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32ch(uint32_t a, uint16_t b) {
+ return __builtin_arm_crc32ch(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32cw(uint32_t a, uint32_t b) {
+ return __builtin_arm_crc32cw(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32cd(uint32_t a, uint64_t b) {
+ return __builtin_arm_crc32cd(a, b);
+}
+#endif
+
+/* 10.1 Special register intrinsics */
+#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
+#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
+#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
+#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
+#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
+#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __ARM_ACLE_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/arm_neon.h b/clang-2690385/lib64/clang/3.8/include/arm_neon.h
new file mode 100644
index 00000000..df94cd0e
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/arm_neon.h
@@ -0,0 +1,69237 @@
+/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_NEON_H
+#define __ARM_NEON_H
+
+#if !defined(__ARM_NEON)
+#error "NEON support not enabled"
+#endif
+
+#include <stdint.h>
+
+typedef float float32_t;
+typedef __fp16 float16_t;
+#ifdef __aarch64__
+typedef double float64_t;
+#endif
+
+#ifdef __aarch64__
+typedef uint8_t poly8_t;
+typedef uint16_t poly16_t;
+typedef uint64_t poly64_t;
+typedef __uint128_t poly128_t;
+#else
+typedef int8_t poly8_t;
+typedef int16_t poly16_t;
+#endif
+typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
+typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
+typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
+typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
+typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
+typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
+typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
+typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
+typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
+typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
+typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
+typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
+typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
+typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
+typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
+typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
+typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
+typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
+typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
+typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
+#ifdef __aarch64__
+typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
+typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
+#endif
+typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
+typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
+typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
+typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
+#ifdef __aarch64__
+typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
+typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
+#endif
+
+typedef struct int8x8x2_t {
+ int8x8_t val[2];
+} int8x8x2_t;
+
+typedef struct int8x16x2_t {
+ int8x16_t val[2];
+} int8x16x2_t;
+
+typedef struct int16x4x2_t {
+ int16x4_t val[2];
+} int16x4x2_t;
+
+typedef struct int16x8x2_t {
+ int16x8_t val[2];
+} int16x8x2_t;
+
+typedef struct int32x2x2_t {
+ int32x2_t val[2];
+} int32x2x2_t;
+
+typedef struct int32x4x2_t {
+ int32x4_t val[2];
+} int32x4x2_t;
+
+typedef struct int64x1x2_t {
+ int64x1_t val[2];
+} int64x1x2_t;
+
+typedef struct int64x2x2_t {
+ int64x2_t val[2];
+} int64x2x2_t;
+
+typedef struct uint8x8x2_t {
+ uint8x8_t val[2];
+} uint8x8x2_t;
+
+typedef struct uint8x16x2_t {
+ uint8x16_t val[2];
+} uint8x16x2_t;
+
+typedef struct uint16x4x2_t {
+ uint16x4_t val[2];
+} uint16x4x2_t;
+
+typedef struct uint16x8x2_t {
+ uint16x8_t val[2];
+} uint16x8x2_t;
+
+typedef struct uint32x2x2_t {
+ uint32x2_t val[2];
+} uint32x2x2_t;
+
+typedef struct uint32x4x2_t {
+ uint32x4_t val[2];
+} uint32x4x2_t;
+
+typedef struct uint64x1x2_t {
+ uint64x1_t val[2];
+} uint64x1x2_t;
+
+typedef struct uint64x2x2_t {
+ uint64x2_t val[2];
+} uint64x2x2_t;
+
+typedef struct float16x4x2_t {
+ float16x4_t val[2];
+} float16x4x2_t;
+
+typedef struct float16x8x2_t {
+ float16x8_t val[2];
+} float16x8x2_t;
+
+typedef struct float32x2x2_t {
+ float32x2_t val[2];
+} float32x2x2_t;
+
+typedef struct float32x4x2_t {
+ float32x4_t val[2];
+} float32x4x2_t;
+
+#ifdef __aarch64__
+typedef struct float64x1x2_t {
+ float64x1_t val[2];
+} float64x1x2_t;
+
+typedef struct float64x2x2_t {
+ float64x2_t val[2];
+} float64x2x2_t;
+
+#endif
+typedef struct poly8x8x2_t {
+ poly8x8_t val[2];
+} poly8x8x2_t;
+
+typedef struct poly8x16x2_t {
+ poly8x16_t val[2];
+} poly8x16x2_t;
+
+typedef struct poly16x4x2_t {
+ poly16x4_t val[2];
+} poly16x4x2_t;
+
+typedef struct poly16x8x2_t {
+ poly16x8_t val[2];
+} poly16x8x2_t;
+
+#ifdef __aarch64__
+typedef struct poly64x1x2_t {
+ poly64x1_t val[2];
+} poly64x1x2_t;
+
+typedef struct poly64x2x2_t {
+ poly64x2_t val[2];
+} poly64x2x2_t;
+
+#endif
+typedef struct int8x8x3_t {
+ int8x8_t val[3];
+} int8x8x3_t;
+
+typedef struct int8x16x3_t {
+ int8x16_t val[3];
+} int8x16x3_t;
+
+typedef struct int16x4x3_t {
+ int16x4_t val[3];
+} int16x4x3_t;
+
+typedef struct int16x8x3_t {
+ int16x8_t val[3];
+} int16x8x3_t;
+
+typedef struct int32x2x3_t {
+ int32x2_t val[3];
+} int32x2x3_t;
+
+typedef struct int32x4x3_t {
+ int32x4_t val[3];
+} int32x4x3_t;
+
+typedef struct int64x1x3_t {
+ int64x1_t val[3];
+} int64x1x3_t;
+
+typedef struct int64x2x3_t {
+ int64x2_t val[3];
+} int64x2x3_t;
+
+typedef struct uint8x8x3_t {
+ uint8x8_t val[3];
+} uint8x8x3_t;
+
+typedef struct uint8x16x3_t {
+ uint8x16_t val[3];
+} uint8x16x3_t;
+
+typedef struct uint16x4x3_t {
+ uint16x4_t val[3];
+} uint16x4x3_t;
+
+typedef struct uint16x8x3_t {
+ uint16x8_t val[3];
+} uint16x8x3_t;
+
+typedef struct uint32x2x3_t {
+ uint32x2_t val[3];
+} uint32x2x3_t;
+
+typedef struct uint32x4x3_t {
+ uint32x4_t val[3];
+} uint32x4x3_t;
+
+typedef struct uint64x1x3_t {
+ uint64x1_t val[3];
+} uint64x1x3_t;
+
+typedef struct uint64x2x3_t {
+ uint64x2_t val[3];
+} uint64x2x3_t;
+
+typedef struct float16x4x3_t {
+ float16x4_t val[3];
+} float16x4x3_t;
+
+typedef struct float16x8x3_t {
+ float16x8_t val[3];
+} float16x8x3_t;
+
+typedef struct float32x2x3_t {
+ float32x2_t val[3];
+} float32x2x3_t;
+
+typedef struct float32x4x3_t {
+ float32x4_t val[3];
+} float32x4x3_t;
+
+#ifdef __aarch64__
+typedef struct float64x1x3_t {
+ float64x1_t val[3];
+} float64x1x3_t;
+
+typedef struct float64x2x3_t {
+ float64x2_t val[3];
+} float64x2x3_t;
+
+#endif
+typedef struct poly8x8x3_t {
+ poly8x8_t val[3];
+} poly8x8x3_t;
+
+typedef struct poly8x16x3_t {
+ poly8x16_t val[3];
+} poly8x16x3_t;
+
+typedef struct poly16x4x3_t {
+ poly16x4_t val[3];
+} poly16x4x3_t;
+
+typedef struct poly16x8x3_t {
+ poly16x8_t val[3];
+} poly16x8x3_t;
+
+#ifdef __aarch64__
+typedef struct poly64x1x3_t {
+ poly64x1_t val[3];
+} poly64x1x3_t;
+
+typedef struct poly64x2x3_t {
+ poly64x2_t val[3];
+} poly64x2x3_t;
+
+#endif
+typedef struct int8x8x4_t {
+ int8x8_t val[4];
+} int8x8x4_t;
+
+typedef struct int8x16x4_t {
+ int8x16_t val[4];
+} int8x16x4_t;
+
+typedef struct int16x4x4_t {
+ int16x4_t val[4];
+} int16x4x4_t;
+
+typedef struct int16x8x4_t {
+ int16x8_t val[4];
+} int16x8x4_t;
+
+typedef struct int32x2x4_t {
+ int32x2_t val[4];
+} int32x2x4_t;
+
+typedef struct int32x4x4_t {
+ int32x4_t val[4];
+} int32x4x4_t;
+
+typedef struct int64x1x4_t {
+ int64x1_t val[4];
+} int64x1x4_t;
+
+typedef struct int64x2x4_t {
+ int64x2_t val[4];
+} int64x2x4_t;
+
+typedef struct uint8x8x4_t {
+ uint8x8_t val[4];
+} uint8x8x4_t;
+
+typedef struct uint8x16x4_t {
+ uint8x16_t val[4];
+} uint8x16x4_t;
+
+typedef struct uint16x4x4_t {
+ uint16x4_t val[4];
+} uint16x4x4_t;
+
+typedef struct uint16x8x4_t {
+ uint16x8_t val[4];
+} uint16x8x4_t;
+
+typedef struct uint32x2x4_t {
+ uint32x2_t val[4];
+} uint32x2x4_t;
+
+typedef struct uint32x4x4_t {
+ uint32x4_t val[4];
+} uint32x4x4_t;
+
+typedef struct uint64x1x4_t {
+ uint64x1_t val[4];
+} uint64x1x4_t;
+
+typedef struct uint64x2x4_t {
+ uint64x2_t val[4];
+} uint64x2x4_t;
+
+typedef struct float16x4x4_t {
+ float16x4_t val[4];
+} float16x4x4_t;
+
+typedef struct float16x8x4_t {
+ float16x8_t val[4];
+} float16x8x4_t;
+
+typedef struct float32x2x4_t {
+ float32x2_t val[4];
+} float32x2x4_t;
+
+typedef struct float32x4x4_t {
+ float32x4_t val[4];
+} float32x4x4_t;
+
+#ifdef __aarch64__
+typedef struct float64x1x4_t {
+ float64x1_t val[4];
+} float64x1x4_t;
+
+typedef struct float64x2x4_t {
+ float64x2_t val[4];
+} float64x2x4_t;
+
+#endif
+typedef struct poly8x8x4_t {
+ poly8x8_t val[4];
+} poly8x8x4_t;
+
+typedef struct poly8x16x4_t {
+ poly8x16_t val[4];
+} poly8x16x4_t;
+
+typedef struct poly16x4x4_t {
+ poly16x4_t val[4];
+} poly16x4x4_t;
+
+typedef struct poly16x8x4_t {
+ poly16x8_t val[4];
+} poly16x8x4_t;
+
+#ifdef __aarch64__
+typedef struct poly64x1x4_t {
+ poly64x1_t val[4];
+} poly64x1x4_t;
+
+typedef struct poly64x2x4_t {
+ poly64x2_t val[4];
+} poly64x2x4_t;
+
+#endif
+
+#define __ai static inline __attribute__((__always_inline__, __nodebug__))
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vabsq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vabsq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vabsq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vabsq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vabsq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vabsq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vabsq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vabsq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vabs_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vabs_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vabs_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vabs_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vabs_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vabs_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vabs_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vabs_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
+ return __ret;
+}
+#else
+__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
+ return __ret;
+}
+#else
+__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vclsq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vclsq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vclsq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vclsq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vclsq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vclsq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vcls_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vcls_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vcls_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vcls_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcls_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vcls_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vclzq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vclzq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vclzq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vclzq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vclzq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vclzq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vclz_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vclz_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vclz_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vclz_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vclz_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vclz_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vcntq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vcntq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vcnt_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vcnt_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
+ float16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
+ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
+ float16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ return __ret;
+}
+#else
+__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vcreate_p8(uint64_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vcreate_p8(uint64_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vcreate_p16(uint64_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vcreate_p16(uint64_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcreate_u8(uint64_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcreate_u8(uint64_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcreate_u32(uint64_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcreate_u32(uint64_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcreate_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcreate_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcreate_u16(uint64_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcreate_u16(uint64_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vcreate_s8(uint64_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vcreate_s8(uint64_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcreate_f32(uint64_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vcreate_f32(uint64_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcreate_f16(uint64_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vcreate_f16(uint64_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vcreate_s32(uint64_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vcreate_s32(uint64_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vcreate_s64(uint64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vcreate_s64(uint64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcreate_s16(uint64_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vcreate_s16(uint64_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
+ return __ret;
+}
+#else
+__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float16x4_t __ret;
+ __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 8);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
+ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x16_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vdupq_n_s8(int8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int8x16_t vdupq_n_s8(int8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vdupq_n_f32(float32_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai float32x4_t vdupq_n_f32(float32_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
+ __ret; \
+})
+#else
+#define vdupq_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vdupq_n_s32(int32_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int32x4_t vdupq_n_s32(int32_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vdupq_n_s64(int64_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai int64x2_t vdupq_n_s64(int64_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vdupq_n_s16(int16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int16x8_t vdupq_n_s16(int16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vdup_n_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai uint64x1_t vdup_n_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vdup_n_s8(int8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int8x8_t vdup_n_s8(int8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vdup_n_f32(float32_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai float32x2_t vdup_n_f32(float32_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
+ __ret; \
+})
+#else
+#define vdup_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vdup_n_s32(int32_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai int32x2_t vdup_n_s32(int32_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vdup_n_s64(int64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai int64x1_t vdup_n_s64(int64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vdup_n_s16(int16_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int16x4_t vdup_n_s16(int16_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
+ __ret; \
+})
+#else
+#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
+ __ret; \
+})
+#else
+#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
+ __ret; \
+})
+#else
+#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
+ __ret; \
+})
+#else
+#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
+ __ret; \
+})
+#else
+#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vext_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
+ __ret; \
+})
+#else
+#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vext_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#else
+__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vget_high_s8(int8x16_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai int8x8_t vget_high_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vget_high_f32(float32x4_t __p0) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#else
+__ai float32x2_t vget_high_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vget_high_f16(float16x8_t __p0) {
+ float16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai float16x4_t vget_high_f16(float16x8_t __p0) {
+ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ float16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
+ float16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vget_high_s32(int32x4_t __p0) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#else
+__ai int32x2_t vget_high_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vget_high_s64(int64x2_t __p0) {
+ int64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1);
+ return __ret;
+}
+#else
+__ai int64x1_t vget_high_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vget_high_s16(int16x8_t __p0) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai int16x4_t vget_high_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vget_low_s8(int8x16_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai int8x8_t vget_low_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vget_low_f32(float32x4_t __p0) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
+ return __ret;
+}
+#else
+__ai float32x2_t vget_low_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vget_low_f16(float16x8_t __p0) {
+ float16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai float16x4_t vget_low_f16(float16x8_t __p0) {
+ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ float16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vget_low_s32(int32x4_t __p0) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
+ return __ret;
+}
+#else
+__ai int32x2_t vget_low_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vget_low_s64(int64x2_t __p0) {
+ int64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0);
+ return __ret;
+}
+#else
+__ai int64x1_t vget_low_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vget_low_s16(int16x8_t __p0) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai int16x4_t vget_low_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p8(__p0) __extension__ ({ \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
+ __ret; \
+})
+#else
+#define vld1_p8(__p0) __extension__ ({ \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p16(__p0) __extension__ ({ \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
+ __ret; \
+})
+#else
+#define vld1_p16(__p0) __extension__ ({ \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p8(__p0) __extension__ ({ \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
+ __ret; \
+})
+#else
+#define vld1q_p8(__p0) __extension__ ({ \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p16(__p0) __extension__ ({ \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
+ __ret; \
+})
+#else
+#define vld1q_p16(__p0) __extension__ ({ \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u8(__p0) __extension__ ({ \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
+ __ret; \
+})
+#else
+#define vld1q_u8(__p0) __extension__ ({ \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u32(__p0) __extension__ ({ \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
+ __ret; \
+})
+#else
+#define vld1q_u32(__p0) __extension__ ({ \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u64(__p0) __extension__ ({ \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
+ __ret; \
+})
+#else
+#define vld1q_u64(__p0) __extension__ ({ \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u16(__p0) __extension__ ({ \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
+ __ret; \
+})
+#else
+#define vld1q_u16(__p0) __extension__ ({ \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s8(__p0) __extension__ ({ \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
+ __ret; \
+})
+#else
+#define vld1q_s8(__p0) __extension__ ({ \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f32(__p0) __extension__ ({ \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
+ __ret; \
+})
+#else
+#define vld1q_f32(__p0) __extension__ ({ \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f16(__p0) __extension__ ({ \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
+ __ret; \
+})
+#else
+#define vld1q_f16(__p0) __extension__ ({ \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s32(__p0) __extension__ ({ \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
+ __ret; \
+})
+#else
+#define vld1q_s32(__p0) __extension__ ({ \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s64(__p0) __extension__ ({ \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
+ __ret; \
+})
+#else
+#define vld1q_s64(__p0) __extension__ ({ \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s16(__p0) __extension__ ({ \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
+ __ret; \
+})
+#else
+#define vld1q_s16(__p0) __extension__ ({ \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u8(__p0) __extension__ ({ \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
+ __ret; \
+})
+#else
+#define vld1_u8(__p0) __extension__ ({ \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u32(__p0) __extension__ ({ \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
+ __ret; \
+})
+#else
+#define vld1_u32(__p0) __extension__ ({ \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u64(__p0) __extension__ ({ \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
+ __ret; \
+})
+#else
+#define vld1_u64(__p0) __extension__ ({ \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u16(__p0) __extension__ ({ \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
+ __ret; \
+})
+#else
+#define vld1_u16(__p0) __extension__ ({ \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s8(__p0) __extension__ ({ \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
+ __ret; \
+})
+#else
+#define vld1_s8(__p0) __extension__ ({ \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f32(__p0) __extension__ ({ \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
+ __ret; \
+})
+#else
+#define vld1_f32(__p0) __extension__ ({ \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f16(__p0) __extension__ ({ \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
+ __ret; \
+})
+#else
+#define vld1_f16(__p0) __extension__ ({ \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s32(__p0) __extension__ ({ \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
+ __ret; \
+})
+#else
+#define vld1_s32(__p0) __extension__ ({ \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s64(__p0) __extension__ ({ \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
+ __ret; \
+})
+#else
+#define vld1_s64(__p0) __extension__ ({ \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s16(__p0) __extension__ ({ \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
+ __ret; \
+})
+#else
+#define vld1_s16(__p0) __extension__ ({ \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_p8(__p0) __extension__ ({ \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
+ __ret; \
+})
+#else
+#define vld1_dup_p8(__p0) __extension__ ({ \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_p16(__p0) __extension__ ({ \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
+ __ret; \
+})
+#else
+#define vld1_dup_p16(__p0) __extension__ ({ \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_p8(__p0) __extension__ ({ \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
+ __ret; \
+})
+#else
+#define vld1q_dup_p8(__p0) __extension__ ({ \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_p16(__p0) __extension__ ({ \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
+ __ret; \
+})
+#else
+#define vld1q_dup_p16(__p0) __extension__ ({ \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_u8(__p0) __extension__ ({ \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
+ __ret; \
+})
+#else
+#define vld1q_dup_u8(__p0) __extension__ ({ \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_u32(__p0) __extension__ ({ \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
+ __ret; \
+})
+#else
+#define vld1q_dup_u32(__p0) __extension__ ({ \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_u64(__p0) __extension__ ({ \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
+ __ret; \
+})
+#else
+#define vld1q_dup_u64(__p0) __extension__ ({ \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_u16(__p0) __extension__ ({ \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
+ __ret; \
+})
+#else
+#define vld1q_dup_u16(__p0) __extension__ ({ \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_s8(__p0) __extension__ ({ \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
+ __ret; \
+})
+#else
+#define vld1q_dup_s8(__p0) __extension__ ({ \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_f32(__p0) __extension__ ({ \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
+ __ret; \
+})
+#else
+#define vld1q_dup_f32(__p0) __extension__ ({ \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_f16(__p0) __extension__ ({ \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
+ __ret; \
+})
+#else
+#define vld1q_dup_f16(__p0) __extension__ ({ \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_s32(__p0) __extension__ ({ \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
+ __ret; \
+})
+#else
+#define vld1q_dup_s32(__p0) __extension__ ({ \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_s64(__p0) __extension__ ({ \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
+ __ret; \
+})
+#else
+#define vld1q_dup_s64(__p0) __extension__ ({ \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_s16(__p0) __extension__ ({ \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
+ __ret; \
+})
+#else
+#define vld1q_dup_s16(__p0) __extension__ ({ \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_u8(__p0) __extension__ ({ \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
+ __ret; \
+})
+#else
+#define vld1_dup_u8(__p0) __extension__ ({ \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_u32(__p0) __extension__ ({ \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
+ __ret; \
+})
+#else
+#define vld1_dup_u32(__p0) __extension__ ({ \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_u64(__p0) __extension__ ({ \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
+ __ret; \
+})
+#else
+#define vld1_dup_u64(__p0) __extension__ ({ \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_u16(__p0) __extension__ ({ \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
+ __ret; \
+})
+#else
+#define vld1_dup_u16(__p0) __extension__ ({ \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_s8(__p0) __extension__ ({ \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
+ __ret; \
+})
+#else
+#define vld1_dup_s8(__p0) __extension__ ({ \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_f32(__p0) __extension__ ({ \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
+ __ret; \
+})
+#else
+#define vld1_dup_f32(__p0) __extension__ ({ \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_f16(__p0) __extension__ ({ \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
+ __ret; \
+})
+#else
+#define vld1_dup_f16(__p0) __extension__ ({ \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_s32(__p0) __extension__ ({ \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
+ __ret; \
+})
+#else
+#define vld1_dup_s32(__p0) __extension__ ({ \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_s64(__p0) __extension__ ({ \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
+ __ret; \
+})
+#else
+#define vld1_dup_s64(__p0) __extension__ ({ \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_s16(__p0) __extension__ ({ \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
+ __ret; \
+})
+#else
+#define vld1_dup_s16(__p0) __extension__ ({ \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
+ __ret; \
+})
+#else
+#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
+ __ret; \
+})
+#else
+#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
+ __ret; \
+})
+#else
+#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
+ __ret; \
+})
+#else
+#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
+ __ret; \
+})
+#else
+#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
+ __ret; \
+})
+#else
+#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
+ __ret; \
+})
+#else
+#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
+ __ret; \
+})
+#else
+#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_p8(__p0) __extension__ ({ \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld2_p8(__p0) __extension__ ({ \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_p16(__p0) __extension__ ({ \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld2_p16(__p0) __extension__ ({ \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_p8(__p0) __extension__ ({ \
+ poly8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld2q_p8(__p0) __extension__ ({ \
+ poly8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_p16(__p0) __extension__ ({ \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld2q_p16(__p0) __extension__ ({ \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_u8(__p0) __extension__ ({ \
+ uint8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld2q_u8(__p0) __extension__ ({ \
+ uint8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_u32(__p0) __extension__ ({ \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld2q_u32(__p0) __extension__ ({ \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_u16(__p0) __extension__ ({ \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld2q_u16(__p0) __extension__ ({ \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_s8(__p0) __extension__ ({ \
+ int8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld2q_s8(__p0) __extension__ ({ \
+ int8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_f32(__p0) __extension__ ({ \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld2q_f32(__p0) __extension__ ({ \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_f16(__p0) __extension__ ({ \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld2q_f16(__p0) __extension__ ({ \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_s32(__p0) __extension__ ({ \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld2q_s32(__p0) __extension__ ({ \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_s16(__p0) __extension__ ({ \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld2q_s16(__p0) __extension__ ({ \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_u8(__p0) __extension__ ({ \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld2_u8(__p0) __extension__ ({ \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_u32(__p0) __extension__ ({ \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld2_u32(__p0) __extension__ ({ \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_u64(__p0) __extension__ ({ \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld2_u64(__p0) __extension__ ({ \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_u16(__p0) __extension__ ({ \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld2_u16(__p0) __extension__ ({ \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_s8(__p0) __extension__ ({ \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld2_s8(__p0) __extension__ ({ \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_f32(__p0) __extension__ ({ \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld2_f32(__p0) __extension__ ({ \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_f16(__p0) __extension__ ({ \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld2_f16(__p0) __extension__ ({ \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_s32(__p0) __extension__ ({ \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld2_s32(__p0) __extension__ ({ \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_s64(__p0) __extension__ ({ \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld2_s64(__p0) __extension__ ({ \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_s16(__p0) __extension__ ({ \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld2_s16(__p0) __extension__ ({ \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_p8(__p0) __extension__ ({ \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld2_dup_p8(__p0) __extension__ ({ \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_p16(__p0) __extension__ ({ \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld2_dup_p16(__p0) __extension__ ({ \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_u8(__p0) __extension__ ({ \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld2_dup_u8(__p0) __extension__ ({ \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_u32(__p0) __extension__ ({ \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld2_dup_u32(__p0) __extension__ ({ \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_u64(__p0) __extension__ ({ \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld2_dup_u64(__p0) __extension__ ({ \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_u16(__p0) __extension__ ({ \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld2_dup_u16(__p0) __extension__ ({ \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_s8(__p0) __extension__ ({ \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld2_dup_s8(__p0) __extension__ ({ \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_f32(__p0) __extension__ ({ \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld2_dup_f32(__p0) __extension__ ({ \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_f16(__p0) __extension__ ({ \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld2_dup_f16(__p0) __extension__ ({ \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_s32(__p0) __extension__ ({ \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld2_dup_s32(__p0) __extension__ ({ \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_s64(__p0) __extension__ ({ \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld2_dup_s64(__p0) __extension__ ({ \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_s16(__p0) __extension__ ({ \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld2_dup_s16(__p0) __extension__ ({ \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
+ __ret; \
+})
+#else
+#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ poly8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
+ __ret; \
+})
+#else
+#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ poly16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
+ __ret; \
+})
+#else
+#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ poly16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
+ __ret; \
+})
+#else
+#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ uint32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
+ __ret; \
+})
+#else
+#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ uint16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \
+ __ret; \
+})
+#else
+#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ float32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \
+ __ret; \
+})
+#else
+#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ float16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \
+ __ret; \
+})
+#else
+#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ int32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \
+ __ret; \
+})
+#else
+#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ int16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
+ __ret; \
+})
+#else
+#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ uint8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
+ __ret; \
+})
+#else
+#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ uint32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
+ __ret; \
+})
+#else
+#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ uint16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
+ __ret; \
+})
+#else
+#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ int8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \
+ __ret; \
+})
+#else
+#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ float32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \
+ __ret; \
+})
+#else
+#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ float16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \
+ __ret; \
+})
+#else
+#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ int32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \
+ __ret; \
+})
+#else
+#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ int16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_p8(__p0) __extension__ ({ \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld3_p8(__p0) __extension__ ({ \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_p16(__p0) __extension__ ({ \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld3_p16(__p0) __extension__ ({ \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_p8(__p0) __extension__ ({ \
+ poly8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld3q_p8(__p0) __extension__ ({ \
+ poly8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_p16(__p0) __extension__ ({ \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld3q_p16(__p0) __extension__ ({ \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_u8(__p0) __extension__ ({ \
+ uint8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld3q_u8(__p0) __extension__ ({ \
+ uint8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_u32(__p0) __extension__ ({ \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld3q_u32(__p0) __extension__ ({ \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_u16(__p0) __extension__ ({ \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld3q_u16(__p0) __extension__ ({ \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_s8(__p0) __extension__ ({ \
+ int8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld3q_s8(__p0) __extension__ ({ \
+ int8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_f32(__p0) __extension__ ({ \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld3q_f32(__p0) __extension__ ({ \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_f16(__p0) __extension__ ({ \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld3q_f16(__p0) __extension__ ({ \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_s32(__p0) __extension__ ({ \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld3q_s32(__p0) __extension__ ({ \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_s16(__p0) __extension__ ({ \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld3q_s16(__p0) __extension__ ({ \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_u8(__p0) __extension__ ({ \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld3_u8(__p0) __extension__ ({ \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_u32(__p0) __extension__ ({ \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld3_u32(__p0) __extension__ ({ \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_u64(__p0) __extension__ ({ \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld3_u64(__p0) __extension__ ({ \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_u16(__p0) __extension__ ({ \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld3_u16(__p0) __extension__ ({ \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_s8(__p0) __extension__ ({ \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld3_s8(__p0) __extension__ ({ \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_f32(__p0) __extension__ ({ \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld3_f32(__p0) __extension__ ({ \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_f16(__p0) __extension__ ({ \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld3_f16(__p0) __extension__ ({ \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_s32(__p0) __extension__ ({ \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld3_s32(__p0) __extension__ ({ \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_s64(__p0) __extension__ ({ \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld3_s64(__p0) __extension__ ({ \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_s16(__p0) __extension__ ({ \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld3_s16(__p0) __extension__ ({ \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_p8(__p0) __extension__ ({ \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld3_dup_p8(__p0) __extension__ ({ \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_p16(__p0) __extension__ ({ \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld3_dup_p16(__p0) __extension__ ({ \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_u8(__p0) __extension__ ({ \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld3_dup_u8(__p0) __extension__ ({ \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_u32(__p0) __extension__ ({ \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld3_dup_u32(__p0) __extension__ ({ \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_u64(__p0) __extension__ ({ \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld3_dup_u64(__p0) __extension__ ({ \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_u16(__p0) __extension__ ({ \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld3_dup_u16(__p0) __extension__ ({ \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_s8(__p0) __extension__ ({ \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld3_dup_s8(__p0) __extension__ ({ \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_f32(__p0) __extension__ ({ \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld3_dup_f32(__p0) __extension__ ({ \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_f16(__p0) __extension__ ({ \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld3_dup_f16(__p0) __extension__ ({ \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_s32(__p0) __extension__ ({ \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld3_dup_s32(__p0) __extension__ ({ \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_s64(__p0) __extension__ ({ \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld3_dup_s64(__p0) __extension__ ({ \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_s16(__p0) __extension__ ({ \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld3_dup_s16(__p0) __extension__ ({ \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
+ __ret; \
+})
+#else
+#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ poly8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
+ __ret; \
+})
+#else
+#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ poly16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
+ __ret; \
+})
+#else
+#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ poly16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
+ __ret; \
+})
+#else
+#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ uint32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
+ __ret; \
+})
+#else
+#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ uint16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
+ __ret; \
+})
+#else
+#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ float32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
+ __ret; \
+})
+#else
+#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ float16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
+ __ret; \
+})
+#else
+#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ int32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
+ __ret; \
+})
+#else
+#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ int16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
+ __ret; \
+})
+#else
+#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ uint8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
+ __ret; \
+})
+#else
+#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ uint32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
+ __ret; \
+})
+#else
+#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ uint16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
+ __ret; \
+})
+#else
+#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ int8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
+ __ret; \
+})
+#else
+#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ float32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
+ __ret; \
+})
+#else
+#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ float16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
+ __ret; \
+})
+#else
+#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ int32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
+ __ret; \
+})
+#else
+#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ int16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_p8(__p0) __extension__ ({ \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld4_p8(__p0) __extension__ ({ \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_p16(__p0) __extension__ ({ \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld4_p16(__p0) __extension__ ({ \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_p8(__p0) __extension__ ({ \
+ poly8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld4q_p8(__p0) __extension__ ({ \
+ poly8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_p16(__p0) __extension__ ({ \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld4q_p16(__p0) __extension__ ({ \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_u8(__p0) __extension__ ({ \
+ uint8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld4q_u8(__p0) __extension__ ({ \
+ uint8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_u32(__p0) __extension__ ({ \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld4q_u32(__p0) __extension__ ({ \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_u16(__p0) __extension__ ({ \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld4q_u16(__p0) __extension__ ({ \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_s8(__p0) __extension__ ({ \
+ int8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld4q_s8(__p0) __extension__ ({ \
+ int8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_f32(__p0) __extension__ ({ \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld4q_f32(__p0) __extension__ ({ \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_f16(__p0) __extension__ ({ \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld4q_f16(__p0) __extension__ ({ \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_s32(__p0) __extension__ ({ \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld4q_s32(__p0) __extension__ ({ \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_s16(__p0) __extension__ ({ \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld4q_s16(__p0) __extension__ ({ \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_u8(__p0) __extension__ ({ \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld4_u8(__p0) __extension__ ({ \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_u32(__p0) __extension__ ({ \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld4_u32(__p0) __extension__ ({ \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_u64(__p0) __extension__ ({ \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld4_u64(__p0) __extension__ ({ \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_u16(__p0) __extension__ ({ \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld4_u16(__p0) __extension__ ({ \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_s8(__p0) __extension__ ({ \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld4_s8(__p0) __extension__ ({ \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_f32(__p0) __extension__ ({ \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld4_f32(__p0) __extension__ ({ \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_f16(__p0) __extension__ ({ \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld4_f16(__p0) __extension__ ({ \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_s32(__p0) __extension__ ({ \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld4_s32(__p0) __extension__ ({ \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_s64(__p0) __extension__ ({ \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld4_s64(__p0) __extension__ ({ \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_s16(__p0) __extension__ ({ \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld4_s16(__p0) __extension__ ({ \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_p8(__p0) __extension__ ({ \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld4_dup_p8(__p0) __extension__ ({ \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_p16(__p0) __extension__ ({ \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld4_dup_p16(__p0) __extension__ ({ \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_u8(__p0) __extension__ ({ \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld4_dup_u8(__p0) __extension__ ({ \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_u32(__p0) __extension__ ({ \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld4_dup_u32(__p0) __extension__ ({ \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_u64(__p0) __extension__ ({ \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld4_dup_u64(__p0) __extension__ ({ \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_u16(__p0) __extension__ ({ \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld4_dup_u16(__p0) __extension__ ({ \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_s8(__p0) __extension__ ({ \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld4_dup_s8(__p0) __extension__ ({ \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_f32(__p0) __extension__ ({ \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld4_dup_f32(__p0) __extension__ ({ \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_f16(__p0) __extension__ ({ \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld4_dup_f16(__p0) __extension__ ({ \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_s32(__p0) __extension__ ({ \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld4_dup_s32(__p0) __extension__ ({ \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_s64(__p0) __extension__ ({ \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld4_dup_s64(__p0) __extension__ ({ \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_s16(__p0) __extension__ ({ \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld4_dup_s16(__p0) __extension__ ({ \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
+ __ret; \
+})
+#else
+#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ poly8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
+ __ret; \
+})
+#else
+#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ poly16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
+ __ret; \
+})
+#else
+#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ poly16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
+ __ret; \
+})
+#else
+#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ uint32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
+ __ret; \
+})
+#else
+#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ uint16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
+ __ret; \
+})
+#else
+#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ float32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
+ __ret; \
+})
+#else
+#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ float16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
+ __ret; \
+})
+#else
+#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ int32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
+ __ret; \
+})
+#else
+#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ int16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
+ __ret; \
+})
+#else
+#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ uint8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
+ __ret; \
+})
+#else
+#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ uint32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
+ __ret; \
+})
+#else
+#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ uint16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
+ __ret; \
+})
+#else
+#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ int8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
+ __ret; \
+})
+#else
+#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ float32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
+ __ret; \
+})
+#else
+#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ float16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
+ __ret; \
+})
+#else
+#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ int32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
+ __ret; \
+})
+#else
+#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ int16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x8_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x2_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __ret;
+ __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint32x2_t __ret;
+ __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint16x4_t __ret;
+ __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __ret;
+ __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int32x2_t __ret;
+ __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int16x4_t __ret;
+ __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x8_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x2_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __ret;
+ __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint32x2_t __ret;
+ __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint16x4_t __ret;
+ __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __ret;
+ __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int32x2_t __ret;
+ __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int16x4_t __ret;
+ __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmovq_n_s8(int8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int8x16_t vmovq_n_s8(int8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmovq_n_f32(float32_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai float32x4_t vmovq_n_f32(float32_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmovq_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
+ __ret; \
+})
+#else
+#define vmovq_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovq_n_s32(int32_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int32x4_t vmovq_n_s32(int32_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmovq_n_s64(int64_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai int64x2_t vmovq_n_s64(int64_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovq_n_s16(int16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int16x8_t vmovq_n_s16(int16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vmov_n_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai uint64x1_t vmov_n_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmov_n_s8(int8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int8x8_t vmov_n_s8(int8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmov_n_f32(float32_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai float32x2_t vmov_n_f32(float32_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmov_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
+ __ret; \
+})
+#else
+#define vmov_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmov_n_s32(int32_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai int32x2_t vmov_n_s32(int32_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vmov_n_s64(int64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai int64x1_t vmov_n_s64(int64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmov_n_s16(int16_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int16x4_t vmov_n_s16(int16_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovl_s8(int8x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vmovl_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmovl_s32(int32x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vmovl_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovl_s16(int16x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vmovl_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmovn_s32(int32x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vmovn_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmovn_s64(int64x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vmovn_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmovn_s16(int16x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vmovn_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
+ float32x4_t __ret;
+ __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 * (uint32x2_t) {__p1, __p1};
+ return __ret;
+}
+#else
+__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 * (uint32x2_t) {__p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
+ float32x2_t __ret;
+ __ret = __p0 * (float32x2_t) {__p1, __p1};
+ return __ret;
+}
+#else
+__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 * (float32x2_t) {__p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 * (int32x2_t) {__p1, __p1};
+ return __ret;
+}
+#else
+__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 * (int32x2_t) {__p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
+ return __ret;
+}
+#else
+__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmvn_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int8x8_t vmvn_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmvn_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int32x2_t vmvn_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmvn_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int16x4_t vmvn_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vnegq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int8x16_t vnegq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vnegq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai float32x4_t vnegq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vnegq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int32x4_t vnegq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vnegq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int16x8_t vnegq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vneg_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int8x8_t vneg_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vneg_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai float32x2_t vneg_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vneg_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int32x2_t vneg_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vneg_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int16x4_t vneg_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqabs_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqabs_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqabs_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqabs_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqabs_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqabs_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqneg_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqneg_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqneg_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqneg_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqneg_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqneg_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
+ __ret; \
+})
+#else
+#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
+ __ret; \
+})
+#else
+#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
+ __ret; \
+})
+#else
+#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vqshl_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#else
+#define vqshl_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
+ __ret; \
+})
+#else
+#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ return __ret;
+}
+#else
+__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ return __ret;
+}
+#else
+__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrev16_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai int8x8_t vrev16_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ return __ret;
+}
+#else
+__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ return __ret;
+}
+#else
+__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrev32_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai int8x8_t vrev32_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vrev32_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai int16x4_t vrev32_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ return __ret;
+}
+#else
+__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ return __ret;
+}
+#else
+__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrev64_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vrev64_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrev64_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
+ return __ret;
+}
+#else
+__ai float32x2_t vrev64_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vrev64_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
+ return __ret;
+}
+#else
+__ai int32x2_t vrev64_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vrev64_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai int16x4_t vrev64_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
+ __ret; \
+})
+#else
+#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
+ __ret; \
+})
+#else
+#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
+ __ret; \
+})
+#else
+#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vrshr_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#else
+#define vrshr_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#define __noswap_vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#define __noswap_vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
+ __ret; \
+})
+#else
+#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
+ __ret; \
+})
+#else
+#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
+ __ret; \
+})
+#else
+#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vshl_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vshl_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vshl_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vshl_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vshl_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vshl_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#else
+#define vshl_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vshl_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vshll_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vshll_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vshll_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
+ __ret; \
+})
+#else
+#define vshll_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vshll_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vshll_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
+ __ret; \
+})
+#else
+#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
+ __ret; \
+})
+#else
+#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
+ __ret; \
+})
+#else
+#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vshr_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vshr_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vshr_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vshr_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vshr_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vshr_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#else
+#define vshr_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vshr_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
+ __ret; \
+})
+#else
+#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
+ __ret; \
+})
+#else
+#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
+ __ret; \
+})
+#else
+#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
+ __ret; \
+})
+#else
+#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
+ __ret; \
+})
+#else
+#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
+ __ret; \
+})
+#else
+#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
+ __ret; \
+})
+#else
+#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
+ __ret; \
+})
+#else
+#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
+})
+#else
+#define vst1_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
+})
+#else
+#define vst1_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
+})
+#else
+#define vst1q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
+})
+#else
+#define vst1q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
+})
+#else
+#define vst1q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
+})
+#else
+#define vst1q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
+})
+#else
+#define vst1q_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
+})
+#else
+#define vst1q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
+})
+#else
+#define vst1q_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
+})
+#else
+#define vst1q_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f16(__p0, __p1) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
+})
+#else
+#define vst1q_f16(__p0, __p1) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
+})
+#else
+#define vst1q_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
+})
+#else
+#define vst1q_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
+})
+#else
+#define vst1q_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
+})
+#else
+#define vst1_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
+})
+#else
+#define vst1_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
+})
+#else
+#define vst1_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
+})
+#else
+#define vst1_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
+})
+#else
+#define vst1_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
+})
+#else
+#define vst1_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f16(__p0, __p1) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
+})
+#else
+#define vst1_f16(__p0, __p1) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
+})
+#else
+#define vst1_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
+})
+#else
+#define vst1_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
+})
+#else
+#define vst1_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
+})
+#else
+#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
+})
+#else
+#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
+})
+#else
+#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
+})
+#else
+#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
+})
+#else
+#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
+})
+#else
+#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
+})
+#else
+#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
+})
+#else
+#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
+})
+#else
+#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
+})
+#else
+#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
+})
+#else
+#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
+})
+#else
+#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
+})
+#else
+#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
+})
+#else
+#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
+})
+#else
+#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
+})
+#else
+#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
+})
+#else
+#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
+})
+#else
+#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
+})
+#else
+#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
+})
+#else
+#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
+})
+#else
+#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
+})
+#else
+#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
+})
+#else
+#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
+})
+#else
+#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
+})
+#else
+#define vst2_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ poly8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
+})
+#else
+#define vst2_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ poly16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
+})
+#else
+#define vst2q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x2_t __s1 = __p1; \
+ poly8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
+})
+#else
+#define vst2q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ poly16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
+})
+#else
+#define vst2q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x2_t __s1 = __p1; \
+ uint8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
+})
+#else
+#define vst2q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ uint32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
+})
+#else
+#define vst2q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ uint16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
+})
+#else
+#define vst2q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x2_t __s1 = __p1; \
+ int8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \
+})
+#else
+#define vst2q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ float32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \
+})
+#else
+#define vst2q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ float16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \
+})
+#else
+#define vst2q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ int32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \
+})
+#else
+#define vst2q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ int16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
+})
+#else
+#define vst2_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ uint8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
+})
+#else
+#define vst2_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ uint32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
+})
+#else
+#define vst2_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
+})
+#else
+#define vst2_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ uint16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_s8(__p0, __p1) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
+})
+#else
+#define vst2_s8(__p0, __p1) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ int8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_f32(__p0, __p1) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \
+})
+#else
+#define vst2_f32(__p0, __p1) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ float32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_f16(__p0, __p1) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \
+})
+#else
+#define vst2_f16(__p0, __p1) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ float16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_s32(__p0, __p1) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \
+})
+#else
+#define vst2_s32(__p0, __p1) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ int32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_s64(__p0, __p1) __extension__ ({ \
+ int64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
+})
+#else
+#define vst2_s64(__p0, __p1) __extension__ ({ \
+ int64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_s16(__p0, __p1) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \
+})
+#else
+#define vst2_s16(__p0, __p1) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ int16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
+})
+#else
+#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ poly8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
+})
+#else
+#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ poly16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
+})
+#else
+#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ poly16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
+})
+#else
+#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ uint32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
+})
+#else
+#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ uint16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \
+})
+#else
+#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ float32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \
+})
+#else
+#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ float16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \
+})
+#else
+#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ int32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \
+})
+#else
+#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ int16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
+})
+#else
+#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ uint8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
+})
+#else
+#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ uint32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
+})
+#else
+#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ uint16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
+})
+#else
+#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ int8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \
+})
+#else
+#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ float32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \
+})
+#else
+#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ float16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \
+})
+#else
+#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ int32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \
+})
+#else
+#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ int16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
+})
+#else
+#define vst3_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ poly8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
+})
+#else
+#define vst3_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ poly16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
+})
+#else
+#define vst3q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x3_t __s1 = __p1; \
+ poly8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
+})
+#else
+#define vst3q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ poly16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
+})
+#else
+#define vst3q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x3_t __s1 = __p1; \
+ uint8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
+})
+#else
+#define vst3q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ uint32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
+})
+#else
+#define vst3q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ uint16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
+})
+#else
+#define vst3q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x3_t __s1 = __p1; \
+ int8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
+})
+#else
+#define vst3q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ float32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
+})
+#else
+#define vst3q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ float16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
+})
+#else
+#define vst3q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ int32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
+})
+#else
+#define vst3q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ int16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
+})
+#else
+#define vst3_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ uint8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
+})
+#else
+#define vst3_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ uint32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
+})
+#else
+#define vst3_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
+})
+#else
+#define vst3_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ uint16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_s8(__p0, __p1) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
+})
+#else
+#define vst3_s8(__p0, __p1) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ int8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_f32(__p0, __p1) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
+})
+#else
+#define vst3_f32(__p0, __p1) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ float32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_f16(__p0, __p1) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
+})
+#else
+#define vst3_f16(__p0, __p1) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ float16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_s32(__p0, __p1) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
+})
+#else
+#define vst3_s32(__p0, __p1) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ int32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_s64(__p0, __p1) __extension__ ({ \
+ int64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
+})
+#else
+#define vst3_s64(__p0, __p1) __extension__ ({ \
+ int64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_s16(__p0, __p1) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
+})
+#else
+#define vst3_s16(__p0, __p1) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ int16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
+})
+#else
+#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ poly8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
+})
+#else
+#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ poly16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
+})
+#else
+#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ poly16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
+})
+#else
+#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ uint32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
+})
+#else
+#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ uint16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
+})
+#else
+#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ float32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
+})
+#else
+#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ float16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
+})
+#else
+#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ int32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
+})
+#else
+#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ int16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
+})
+#else
+#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ uint8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
+})
+#else
+#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ uint32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
+})
+#else
+#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ uint16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
+})
+#else
+#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ int8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
+})
+#else
+#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ float32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
+})
+#else
+#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ float16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
+})
+#else
+#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ int32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
+})
+#else
+#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ int16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
+})
+#else
+#define vst4_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ poly8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
+})
+#else
+#define vst4_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ poly16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
+})
+#else
+#define vst4q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x4_t __s1 = __p1; \
+ poly8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
+})
+#else
+#define vst4q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ poly16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
+})
+#else
+#define vst4q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x4_t __s1 = __p1; \
+ uint8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
+})
+#else
+#define vst4q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ uint32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
+})
+#else
+#define vst4q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ uint16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
+})
+#else
+#define vst4q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x4_t __s1 = __p1; \
+ int8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
+})
+#else
+#define vst4q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ float32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
+})
+#else
+#define vst4q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ float16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
+})
+#else
+#define vst4q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ int32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
+})
+#else
+#define vst4q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ int16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
+})
+#else
+#define vst4_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ uint8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
+})
+#else
+#define vst4_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ uint32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
+})
+#else
+#define vst4_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
+})
+#else
+#define vst4_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ uint16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_s8(__p0, __p1) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
+})
+#else
+#define vst4_s8(__p0, __p1) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ int8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_f32(__p0, __p1) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
+})
+#else
+#define vst4_f32(__p0, __p1) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ float32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_f16(__p0, __p1) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
+})
+#else
+#define vst4_f16(__p0, __p1) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ float16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_s32(__p0, __p1) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
+})
+#else
+#define vst4_s32(__p0, __p1) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ int32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_s64(__p0, __p1) __extension__ ({ \
+ int64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
+})
+#else
+#define vst4_s64(__p0, __p1) __extension__ ({ \
+ int64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_s16(__p0, __p1) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
+})
+#else
+#define vst4_s16(__p0, __p1) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ int16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
+})
+#else
+#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ poly8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
+})
+#else
+#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ poly16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
+})
+#else
+#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ poly16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
+})
+#else
+#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ uint32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
+})
+#else
+#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ uint16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
+})
+#else
+#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ float32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
+})
+#else
+#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ float16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
+})
+#else
+#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ int32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
+})
+#else
+#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ int16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
+})
+#else
+#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ uint8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
+})
+#else
+#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ uint32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
+})
+#else
+#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ uint16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
+})
+#else
+#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ int8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
+})
+#else
+#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ float32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
+})
+#else
+#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ float16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
+})
+#else
+#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ int32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
+})
+#else
+#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ int16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
+ return __ret;
+}
+#else
+__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 - vmovl_u8(__p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 - __noswap_vmovl_u8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 - vmovl_u32(__p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 - __noswap_vmovl_u32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 - vmovl_u16(__p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __noswap_vmovl_u16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 - vmovl_s8(__p1);
+ return __ret;
+}
+#else
+__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 - __noswap_vmovl_s8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 - vmovl_s32(__p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 - __noswap_vmovl_s32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 - vmovl_s16(__p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __noswap_vmovl_s16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
+ poly8x8x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
+ uint8x8x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
+ int8x8x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
+ poly8x8x3_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
+ uint8x8x3_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
+ int8x8x3_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
+ poly8x8x4_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
+ uint8x8x4_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
+ int8x8x4_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8x2_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8x2_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8x2_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8x3_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8x3_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8x3_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8x4_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8x4_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8x4_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
+ return __ret;
+}
+#else
+__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
+ return __ret;
+}
+#else
+__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
+ return __ret;
+}
+#else
+__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8x2_t __ret;
+ __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4x2_t __ret;
+ __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
+ return __ret;
+}
+#else
+__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
+ return __ret;
+}
+#else
+__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
+ return __ret;
+}
+#else
+__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8x2_t __ret;
+ __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4x2_t __ret;
+ __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
+ return __ret;
+}
+#else
+__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
+ return __ret;
+}
+#else
+__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
+ return __ret;
+}
+#else
+__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8x2_t __ret;
+ __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4x2_t __ret;
+ __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#if !defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#endif
+#if __ARM_ARCH >= 8
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#endif
+#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrndq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrndq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrnd_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrnd_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrnda_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrnda_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrndm_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrndm_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrndn_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrndn_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrndp_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrndp_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrndx_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrndx_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#endif
+#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#endif
+#if __ARM_ARCH >= 8 && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
+ poly128_t __ret;
+ __ret = (poly128_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
+ float16x8_t __ret;
+ __ret = (float16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#endif
+#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrndq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrnd_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrnd_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrnda_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrnda_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrndi_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrndi_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrndi_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrndi_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrndm_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrndm_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrndn_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrndn_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrndp_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrndp_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrndx_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrndx_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#endif
+#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#endif
+
+#endif
+#if __ARM_FEATURE_CRYPTO
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vsha1h_u32(uint32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vsha1h_u32(uint32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_QRDMX)
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __ret;
+ __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __ret;
+ __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __ret;
+ __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x2_t __ret;
+ __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __ret;
+ __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __ret; \
+ __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __ret; \
+ __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __ret; \
+ __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __ret;
+ __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __ret;
+ __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __ret;
+ __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x2_t __ret;
+ __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __ret;
+ __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __ret; \
+ __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __ret; \
+ __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __ret; \
+ __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x8_t __ret; \
+ __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x2_t __ret; \
+ __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x4_t __ret; \
+ __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x8_t __ret; \
+ __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x2_t __ret; \
+ __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x4_t __ret; \
+ __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
+ __ret; \
+})
+#else
+#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#endif
+#if defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vabsq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vabsq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vabsq_s64(int64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vabsq_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vabs_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vabs_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vabs_s64(int64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vabs_s64(int64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vabsd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vabsd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint16x8_t __ret;
+ __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint32x4_t __ret;
+ __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint8x16_t __ret;
+ __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int16x8_t __ret;
+ __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ int32x4_t __ret;
+ __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int8x16_t __ret;
+ __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddlvq_s8(int8x16_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int16_t vaddlvq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vaddlvq_s32(int32x4_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int64_t vaddlvq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddlvq_s16(int16x8_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int32_t vaddlvq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddlv_s8(int8x8_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int16_t vaddlv_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vaddlv_s32(int32x2_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int64_t vaddlv_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddlv_s16(int16x4_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int32_t vaddlv_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vaddvq_s8(int8x16_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int8_t vaddvq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vaddvq_f64(float64x2_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float64_t vaddvq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vaddvq_f32(float32x4_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vaddvq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddvq_s32(int32x4_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int32_t vaddvq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vaddvq_s64(int64x2_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int64_t vaddvq_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddvq_s16(int16x8_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int16_t vaddvq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vaddv_u8(uint8x8_t __p0) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint8_t vaddv_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vaddv_u32(uint32x2_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vaddv_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vaddv_u16(uint16x4_t __p0) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint16_t vaddv_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vaddv_s8(int8x8_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int8_t vaddv_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vaddv_f32(float32x2_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vaddv_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vaddv_s32(int32x2_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int32_t vaddv_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vaddv_s16(int16x4_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int16_t vaddv_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
+ return __ret;
+}
+#else
+__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
+ return __ret;
+}
+#else
+__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ poly64x2_t __ret;
+ __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 == __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 == __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 == __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 == __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vceqzd_u64(uint64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vceqzd_u64(uint64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vceqzd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vceqzd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vceqzd_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vceqzd_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vceqzs_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vceqzs_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 >= __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 >= __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 >= __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcgezd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vcgezd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcgezd_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vcgezd_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcgezs_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vcgezs_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 > __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 > __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 > __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcgtzd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vcgtzd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcgtzd_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vcgtzd_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcgtzs_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vcgtzs_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 <= __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 <= __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 <= __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vclez_s8(int8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vclez_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vclez_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vclez_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclez_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vclez_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclez_s32(int32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vclez_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vclez_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vclez_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclez_s16(int16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vclez_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vclezd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vclezd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vclezd_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vclezd_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vclezs_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vclezs_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 < __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 < __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0 < __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcltzd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vcltzd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcltzd_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vcltzd_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcltzs_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vcltzs_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ return __ret;
+}
+#else
+__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ return __ret;
+}
+#else
+__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_p8(__p0_0, __p1_0, __p2_0, __p3_0) __extension__ ({ \
+ poly8x16_t __s0_0 = __p0_0; \
+ poly8x8_t __s2_0 = __p2_0; \
+ poly8x16_t __ret_0; \
+ __ret_0 = vsetq_lane_p8(vget_lane_p8(__s2_0, __p3_0), __s0_0, __p1_0); \
+ __ret_0; \
+})
+#else
+#define vcopyq_lane_p8(__p0_1, __p1_1, __p2_1, __p3_1) __extension__ ({ \
+ poly8x16_t __s0_1 = __p0_1; \
+ poly8x8_t __s2_1 = __p2_1; \
+ poly8x16_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __rev2_1; __rev2_1 = __builtin_shufflevector(__s2_1, __s2_1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret_1; \
+ __ret_1 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_1, __p3_1), __rev0_1, __p1_1); \
+ __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_1; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_p16(__p0_2, __p1_2, __p2_2, __p3_2) __extension__ ({ \
+ poly16x8_t __s0_2 = __p0_2; \
+ poly16x4_t __s2_2 = __p2_2; \
+ poly16x8_t __ret_2; \
+ __ret_2 = vsetq_lane_p16(vget_lane_p16(__s2_2, __p3_2), __s0_2, __p1_2); \
+ __ret_2; \
+})
+#else
+#define vcopyq_lane_p16(__p0_3, __p1_3, __p2_3, __p3_3) __extension__ ({ \
+ poly16x8_t __s0_3 = __p0_3; \
+ poly16x4_t __s2_3 = __p2_3; \
+ poly16x8_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x4_t __rev2_3; __rev2_3 = __builtin_shufflevector(__s2_3, __s2_3, 3, 2, 1, 0); \
+ poly16x8_t __ret_3; \
+ __ret_3 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_3, __p3_3), __rev0_3, __p1_3); \
+ __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_3; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u8(__p0_4, __p1_4, __p2_4, __p3_4) __extension__ ({ \
+ uint8x16_t __s0_4 = __p0_4; \
+ uint8x8_t __s2_4 = __p2_4; \
+ uint8x16_t __ret_4; \
+ __ret_4 = vsetq_lane_u8(vget_lane_u8(__s2_4, __p3_4), __s0_4, __p1_4); \
+ __ret_4; \
+})
+#else
+#define vcopyq_lane_u8(__p0_5, __p1_5, __p2_5, __p3_5) __extension__ ({ \
+ uint8x16_t __s0_5 = __p0_5; \
+ uint8x8_t __s2_5 = __p2_5; \
+ uint8x16_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev2_5; __rev2_5 = __builtin_shufflevector(__s2_5, __s2_5, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret_5; \
+ __ret_5 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_5, __p3_5), __rev0_5, __p1_5); \
+ __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_5; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u32(__p0_6, __p1_6, __p2_6, __p3_6) __extension__ ({ \
+ uint32x4_t __s0_6 = __p0_6; \
+ uint32x2_t __s2_6 = __p2_6; \
+ uint32x4_t __ret_6; \
+ __ret_6 = vsetq_lane_u32(vget_lane_u32(__s2_6, __p3_6), __s0_6, __p1_6); \
+ __ret_6; \
+})
+#else
+#define vcopyq_lane_u32(__p0_7, __p1_7, __p2_7, __p3_7) __extension__ ({ \
+ uint32x4_t __s0_7 = __p0_7; \
+ uint32x2_t __s2_7 = __p2_7; \
+ uint32x4_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \
+ uint32x2_t __rev2_7; __rev2_7 = __builtin_shufflevector(__s2_7, __s2_7, 1, 0); \
+ uint32x4_t __ret_7; \
+ __ret_7 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_7, __p3_7), __rev0_7, __p1_7); \
+ __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 3, 2, 1, 0); \
+ __ret_7; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u64(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \
+ uint64x2_t __s0_8 = __p0_8; \
+ uint64x1_t __s2_8 = __p2_8; \
+ uint64x2_t __ret_8; \
+ __ret_8 = vsetq_lane_u64(vget_lane_u64(__s2_8, __p3_8), __s0_8, __p1_8); \
+ __ret_8; \
+})
+#else
+#define vcopyq_lane_u64(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \
+ uint64x2_t __s0_9 = __p0_9; \
+ uint64x1_t __s2_9 = __p2_9; \
+ uint64x2_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 1, 0); \
+ uint64x2_t __ret_9; \
+ __ret_9 = __noswap_vsetq_lane_u64(__noswap_vget_lane_u64(__s2_9, __p3_9), __rev0_9, __p1_9); \
+ __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 1, 0); \
+ __ret_9; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_u16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \
+ uint16x8_t __s0_10 = __p0_10; \
+ uint16x4_t __s2_10 = __p2_10; \
+ uint16x8_t __ret_10; \
+ __ret_10 = vsetq_lane_u16(vget_lane_u16(__s2_10, __p3_10), __s0_10, __p1_10); \
+ __ret_10; \
+})
+#else
+#define vcopyq_lane_u16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \
+ uint16x8_t __s0_11 = __p0_11; \
+ uint16x4_t __s2_11 = __p2_11; \
+ uint16x8_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __rev2_11; __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 3, 2, 1, 0); \
+ uint16x8_t __ret_11; \
+ __ret_11 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_11, __p3_11), __rev0_11, __p1_11); \
+ __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_11; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s8(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \
+ int8x16_t __s0_12 = __p0_12; \
+ int8x8_t __s2_12 = __p2_12; \
+ int8x16_t __ret_12; \
+ __ret_12 = vsetq_lane_s8(vget_lane_s8(__s2_12, __p3_12), __s0_12, __p1_12); \
+ __ret_12; \
+})
+#else
+#define vcopyq_lane_s8(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \
+ int8x16_t __s0_13 = __p0_13; \
+ int8x8_t __s2_13 = __p2_13; \
+ int8x16_t __rev0_13; __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev2_13; __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret_13; \
+ __ret_13 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_13, __p3_13), __rev0_13, __p1_13); \
+ __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_13; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_f32(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \
+ float32x4_t __s0_14 = __p0_14; \
+ float32x2_t __s2_14 = __p2_14; \
+ float32x4_t __ret_14; \
+ __ret_14 = vsetq_lane_f32(vget_lane_f32(__s2_14, __p3_14), __s0_14, __p1_14); \
+ __ret_14; \
+})
+#else
+#define vcopyq_lane_f32(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \
+ float32x4_t __s0_15 = __p0_15; \
+ float32x2_t __s2_15 = __p2_15; \
+ float32x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \
+ float32x2_t __rev2_15; __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 1, 0); \
+ float32x4_t __ret_15; \
+ __ret_15 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_15, __p3_15), __rev0_15, __p1_15); \
+ __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 3, 2, 1, 0); \
+ __ret_15; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s32(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \
+ int32x4_t __s0_16 = __p0_16; \
+ int32x2_t __s2_16 = __p2_16; \
+ int32x4_t __ret_16; \
+ __ret_16 = vsetq_lane_s32(vget_lane_s32(__s2_16, __p3_16), __s0_16, __p1_16); \
+ __ret_16; \
+})
+#else
+#define vcopyq_lane_s32(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \
+ int32x4_t __s0_17 = __p0_17; \
+ int32x2_t __s2_17 = __p2_17; \
+ int32x4_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 3, 2, 1, 0); \
+ int32x2_t __rev2_17; __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 1, 0); \
+ int32x4_t __ret_17; \
+ __ret_17 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_17, __p3_17), __rev0_17, __p1_17); \
+ __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 3, 2, 1, 0); \
+ __ret_17; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s64(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \
+ int64x2_t __s0_18 = __p0_18; \
+ int64x1_t __s2_18 = __p2_18; \
+ int64x2_t __ret_18; \
+ __ret_18 = vsetq_lane_s64(vget_lane_s64(__s2_18, __p3_18), __s0_18, __p1_18); \
+ __ret_18; \
+})
+#else
+#define vcopyq_lane_s64(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \
+ int64x2_t __s0_19 = __p0_19; \
+ int64x1_t __s2_19 = __p2_19; \
+ int64x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \
+ int64x2_t __ret_19; \
+ __ret_19 = __noswap_vsetq_lane_s64(__noswap_vget_lane_s64(__s2_19, __p3_19), __rev0_19, __p1_19); \
+ __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 1, 0); \
+ __ret_19; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s16(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \
+ int16x8_t __s0_20 = __p0_20; \
+ int16x4_t __s2_20 = __p2_20; \
+ int16x8_t __ret_20; \
+ __ret_20 = vsetq_lane_s16(vget_lane_s16(__s2_20, __p3_20), __s0_20, __p1_20); \
+ __ret_20; \
+})
+#else
+#define vcopyq_lane_s16(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \
+ int16x8_t __s0_21 = __p0_21; \
+ int16x4_t __s2_21 = __p2_21; \
+ int16x8_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2_21; __rev2_21 = __builtin_shufflevector(__s2_21, __s2_21, 3, 2, 1, 0); \
+ int16x8_t __ret_21; \
+ __ret_21 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_21, __p3_21), __rev0_21, __p1_21); \
+ __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_21; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_p8(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \
+ poly8x8_t __s0_22 = __p0_22; \
+ poly8x8_t __s2_22 = __p2_22; \
+ poly8x8_t __ret_22; \
+ __ret_22 = vset_lane_p8(vget_lane_p8(__s2_22, __p3_22), __s0_22, __p1_22); \
+ __ret_22; \
+})
+#else
+#define vcopy_lane_p8(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \
+ poly8x8_t __s0_23 = __p0_23; \
+ poly8x8_t __s2_23 = __p2_23; \
+ poly8x8_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __rev2_23; __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret_23; \
+ __ret_23 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_23, __p3_23), __rev0_23, __p1_23); \
+ __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_23; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_p16(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \
+ poly16x4_t __s0_24 = __p0_24; \
+ poly16x4_t __s2_24 = __p2_24; \
+ poly16x4_t __ret_24; \
+ __ret_24 = vset_lane_p16(vget_lane_p16(__s2_24, __p3_24), __s0_24, __p1_24); \
+ __ret_24; \
+})
+#else
+#define vcopy_lane_p16(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \
+ poly16x4_t __s0_25 = __p0_25; \
+ poly16x4_t __s2_25 = __p2_25; \
+ poly16x4_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \
+ poly16x4_t __rev2_25; __rev2_25 = __builtin_shufflevector(__s2_25, __s2_25, 3, 2, 1, 0); \
+ poly16x4_t __ret_25; \
+ __ret_25 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_25, __p3_25), __rev0_25, __p1_25); \
+ __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 3, 2, 1, 0); \
+ __ret_25; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u8(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \
+ uint8x8_t __s0_26 = __p0_26; \
+ uint8x8_t __s2_26 = __p2_26; \
+ uint8x8_t __ret_26; \
+ __ret_26 = vset_lane_u8(vget_lane_u8(__s2_26, __p3_26), __s0_26, __p1_26); \
+ __ret_26; \
+})
+#else
+#define vcopy_lane_u8(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \
+ uint8x8_t __s0_27 = __p0_27; \
+ uint8x8_t __s2_27 = __p2_27; \
+ uint8x8_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev2_27; __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret_27; \
+ __ret_27 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_27, __p3_27), __rev0_27, __p1_27); \
+ __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_27; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u32(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \
+ uint32x2_t __s0_28 = __p0_28; \
+ uint32x2_t __s2_28 = __p2_28; \
+ uint32x2_t __ret_28; \
+ __ret_28 = vset_lane_u32(vget_lane_u32(__s2_28, __p3_28), __s0_28, __p1_28); \
+ __ret_28; \
+})
+#else
+#define vcopy_lane_u32(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \
+ uint32x2_t __s0_29 = __p0_29; \
+ uint32x2_t __s2_29 = __p2_29; \
+ uint32x2_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \
+ uint32x2_t __rev2_29; __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 1, 0); \
+ uint32x2_t __ret_29; \
+ __ret_29 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_29, __p3_29), __rev0_29, __p1_29); \
+ __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \
+ __ret_29; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u64(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \
+ uint64x1_t __s0_30 = __p0_30; \
+ uint64x1_t __s2_30 = __p2_30; \
+ uint64x1_t __ret_30; \
+ __ret_30 = vset_lane_u64(vget_lane_u64(__s2_30, __p3_30), __s0_30, __p1_30); \
+ __ret_30; \
+})
+#else
+#define vcopy_lane_u64(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \
+ uint64x1_t __s0_31 = __p0_31; \
+ uint64x1_t __s2_31 = __p2_31; \
+ uint64x1_t __ret_31; \
+ __ret_31 = __noswap_vset_lane_u64(__noswap_vget_lane_u64(__s2_31, __p3_31), __s0_31, __p1_31); \
+ __ret_31; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u16(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \
+ uint16x4_t __s0_32 = __p0_32; \
+ uint16x4_t __s2_32 = __p2_32; \
+ uint16x4_t __ret_32; \
+ __ret_32 = vset_lane_u16(vget_lane_u16(__s2_32, __p3_32), __s0_32, __p1_32); \
+ __ret_32; \
+})
+#else
+#define vcopy_lane_u16(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \
+ uint16x4_t __s0_33 = __p0_33; \
+ uint16x4_t __s2_33 = __p2_33; \
+ uint16x4_t __rev0_33; __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 3, 2, 1, 0); \
+ uint16x4_t __rev2_33; __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 3, 2, 1, 0); \
+ uint16x4_t __ret_33; \
+ __ret_33 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_33, __p3_33), __rev0_33, __p1_33); \
+ __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 3, 2, 1, 0); \
+ __ret_33; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s8(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \
+ int8x8_t __s0_34 = __p0_34; \
+ int8x8_t __s2_34 = __p2_34; \
+ int8x8_t __ret_34; \
+ __ret_34 = vset_lane_s8(vget_lane_s8(__s2_34, __p3_34), __s0_34, __p1_34); \
+ __ret_34; \
+})
+#else
+#define vcopy_lane_s8(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \
+ int8x8_t __s0_35 = __p0_35; \
+ int8x8_t __s2_35 = __p2_35; \
+ int8x8_t __rev0_35; __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev2_35; __rev2_35 = __builtin_shufflevector(__s2_35, __s2_35, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret_35; \
+ __ret_35 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_35, __p3_35), __rev0_35, __p1_35); \
+ __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_35; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_f32(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \
+ float32x2_t __s0_36 = __p0_36; \
+ float32x2_t __s2_36 = __p2_36; \
+ float32x2_t __ret_36; \
+ __ret_36 = vset_lane_f32(vget_lane_f32(__s2_36, __p3_36), __s0_36, __p1_36); \
+ __ret_36; \
+})
+#else
+#define vcopy_lane_f32(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \
+ float32x2_t __s0_37 = __p0_37; \
+ float32x2_t __s2_37 = __p2_37; \
+ float32x2_t __rev0_37; __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 1, 0); \
+ float32x2_t __rev2_37; __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 1, 0); \
+ float32x2_t __ret_37; \
+ __ret_37 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_37, __p3_37), __rev0_37, __p1_37); \
+ __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 1, 0); \
+ __ret_37; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s32(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \
+ int32x2_t __s0_38 = __p0_38; \
+ int32x2_t __s2_38 = __p2_38; \
+ int32x2_t __ret_38; \
+ __ret_38 = vset_lane_s32(vget_lane_s32(__s2_38, __p3_38), __s0_38, __p1_38); \
+ __ret_38; \
+})
+#else
+#define vcopy_lane_s32(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \
+ int32x2_t __s0_39 = __p0_39; \
+ int32x2_t __s2_39 = __p2_39; \
+ int32x2_t __rev0_39; __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 1, 0); \
+ int32x2_t __rev2_39; __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 1, 0); \
+ int32x2_t __ret_39; \
+ __ret_39 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_39, __p3_39), __rev0_39, __p1_39); \
+ __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 1, 0); \
+ __ret_39; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s64(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \
+ int64x1_t __s0_40 = __p0_40; \
+ int64x1_t __s2_40 = __p2_40; \
+ int64x1_t __ret_40; \
+ __ret_40 = vset_lane_s64(vget_lane_s64(__s2_40, __p3_40), __s0_40, __p1_40); \
+ __ret_40; \
+})
+#else
+#define vcopy_lane_s64(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \
+ int64x1_t __s0_41 = __p0_41; \
+ int64x1_t __s2_41 = __p2_41; \
+ int64x1_t __ret_41; \
+ __ret_41 = __noswap_vset_lane_s64(__noswap_vget_lane_s64(__s2_41, __p3_41), __s0_41, __p1_41); \
+ __ret_41; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s16(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
+ int16x4_t __s0_42 = __p0_42; \
+ int16x4_t __s2_42 = __p2_42; \
+ int16x4_t __ret_42; \
+ __ret_42 = vset_lane_s16(vget_lane_s16(__s2_42, __p3_42), __s0_42, __p1_42); \
+ __ret_42; \
+})
+#else
+#define vcopy_lane_s16(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
+ int16x4_t __s0_43 = __p0_43; \
+ int16x4_t __s2_43 = __p2_43; \
+ int16x4_t __rev0_43; __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \
+ int16x4_t __rev2_43; __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 3, 2, 1, 0); \
+ int16x4_t __ret_43; \
+ __ret_43 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_43, __p3_43), __rev0_43, __p1_43); \
+ __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \
+ __ret_43; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_p8(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
+ poly8x16_t __s0_44 = __p0_44; \
+ poly8x16_t __s2_44 = __p2_44; \
+ poly8x16_t __ret_44; \
+ __ret_44 = vsetq_lane_p8(vgetq_lane_p8(__s2_44, __p3_44), __s0_44, __p1_44); \
+ __ret_44; \
+})
+#else
+#define vcopyq_laneq_p8(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
+ poly8x16_t __s0_45 = __p0_45; \
+ poly8x16_t __s2_45 = __p2_45; \
+ poly8x16_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __rev2_45; __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret_45; \
+ __ret_45 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_45, __p3_45), __rev0_45, __p1_45); \
+ __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_45; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_p16(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
+ poly16x8_t __s0_46 = __p0_46; \
+ poly16x8_t __s2_46 = __p2_46; \
+ poly16x8_t __ret_46; \
+ __ret_46 = vsetq_lane_p16(vgetq_lane_p16(__s2_46, __p3_46), __s0_46, __p1_46); \
+ __ret_46; \
+})
+#else
+#define vcopyq_laneq_p16(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
+ poly16x8_t __s0_47 = __p0_47; \
+ poly16x8_t __s2_47 = __p2_47; \
+ poly16x8_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret_47; \
+ __ret_47 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_47, __p3_47), __rev0_47, __p1_47); \
+ __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_47; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u8(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
+ uint8x16_t __s0_48 = __p0_48; \
+ uint8x16_t __s2_48 = __p2_48; \
+ uint8x16_t __ret_48; \
+ __ret_48 = vsetq_lane_u8(vgetq_lane_u8(__s2_48, __p3_48), __s0_48, __p1_48); \
+ __ret_48; \
+})
+#else
+#define vcopyq_laneq_u8(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
+ uint8x16_t __s0_49 = __p0_49; \
+ uint8x16_t __s2_49 = __p2_49; \
+ uint8x16_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret_49; \
+ __ret_49 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_49, __p3_49), __rev0_49, __p1_49); \
+ __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_49; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
+ uint32x4_t __s0_50 = __p0_50; \
+ uint32x4_t __s2_50 = __p2_50; \
+ uint32x4_t __ret_50; \
+ __ret_50 = vsetq_lane_u32(vgetq_lane_u32(__s2_50, __p3_50), __s0_50, __p1_50); \
+ __ret_50; \
+})
+#else
+#define vcopyq_laneq_u32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
+ uint32x4_t __s0_51 = __p0_51; \
+ uint32x4_t __s2_51 = __p2_51; \
+ uint32x4_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 3, 2, 1, 0); \
+ uint32x4_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \
+ uint32x4_t __ret_51; \
+ __ret_51 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_51, __p3_51), __rev0_51, __p1_51); \
+ __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 3, 2, 1, 0); \
+ __ret_51; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u64(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
+ uint64x2_t __s0_52 = __p0_52; \
+ uint64x2_t __s2_52 = __p2_52; \
+ uint64x2_t __ret_52; \
+ __ret_52 = vsetq_lane_u64(vgetq_lane_u64(__s2_52, __p3_52), __s0_52, __p1_52); \
+ __ret_52; \
+})
+#else
+#define vcopyq_laneq_u64(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
+ uint64x2_t __s0_53 = __p0_53; \
+ uint64x2_t __s2_53 = __p2_53; \
+ uint64x2_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \
+ uint64x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
+ uint64x2_t __ret_53; \
+ __ret_53 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_53, __p3_53), __rev0_53, __p1_53); \
+ __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \
+ __ret_53; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
+ uint16x8_t __s0_54 = __p0_54; \
+ uint16x8_t __s2_54 = __p2_54; \
+ uint16x8_t __ret_54; \
+ __ret_54 = vsetq_lane_u16(vgetq_lane_u16(__s2_54, __p3_54), __s0_54, __p1_54); \
+ __ret_54; \
+})
+#else
+#define vcopyq_laneq_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
+ uint16x8_t __s0_55 = __p0_55; \
+ uint16x8_t __s2_55 = __p2_55; \
+ uint16x8_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret_55; \
+ __ret_55 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_55, __p3_55), __rev0_55, __p1_55); \
+ __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_55; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s8(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
+ int8x16_t __s0_56 = __p0_56; \
+ int8x16_t __s2_56 = __p2_56; \
+ int8x16_t __ret_56; \
+ __ret_56 = vsetq_lane_s8(vgetq_lane_s8(__s2_56, __p3_56), __s0_56, __p1_56); \
+ __ret_56; \
+})
+#else
+#define vcopyq_laneq_s8(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
+ int8x16_t __s0_57 = __p0_57; \
+ int8x16_t __s2_57 = __p2_57; \
+ int8x16_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret_57; \
+ __ret_57 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_57, __p3_57), __rev0_57, __p1_57); \
+ __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_57; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_f32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
+ float32x4_t __s0_58 = __p0_58; \
+ float32x4_t __s2_58 = __p2_58; \
+ float32x4_t __ret_58; \
+ __ret_58 = vsetq_lane_f32(vgetq_lane_f32(__s2_58, __p3_58), __s0_58, __p1_58); \
+ __ret_58; \
+})
+#else
+#define vcopyq_laneq_f32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
+ float32x4_t __s0_59 = __p0_59; \
+ float32x4_t __s2_59 = __p2_59; \
+ float32x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \
+ float32x4_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \
+ float32x4_t __ret_59; \
+ __ret_59 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_59, __p3_59), __rev0_59, __p1_59); \
+ __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \
+ __ret_59; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
+ int32x4_t __s0_60 = __p0_60; \
+ int32x4_t __s2_60 = __p2_60; \
+ int32x4_t __ret_60; \
+ __ret_60 = vsetq_lane_s32(vgetq_lane_s32(__s2_60, __p3_60), __s0_60, __p1_60); \
+ __ret_60; \
+})
+#else
+#define vcopyq_laneq_s32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
+ int32x4_t __s0_61 = __p0_61; \
+ int32x4_t __s2_61 = __p2_61; \
+ int32x4_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
+ int32x4_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
+ int32x4_t __ret_61; \
+ __ret_61 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_61, __p3_61), __rev0_61, __p1_61); \
+ __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
+ __ret_61; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s64(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
+ int64x2_t __s0_62 = __p0_62; \
+ int64x2_t __s2_62 = __p2_62; \
+ int64x2_t __ret_62; \
+ __ret_62 = vsetq_lane_s64(vgetq_lane_s64(__s2_62, __p3_62), __s0_62, __p1_62); \
+ __ret_62; \
+})
+#else
+#define vcopyq_laneq_s64(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
+ int64x2_t __s0_63 = __p0_63; \
+ int64x2_t __s2_63 = __p2_63; \
+ int64x2_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \
+ int64x2_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
+ int64x2_t __ret_63; \
+ __ret_63 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_63, __p3_63), __rev0_63, __p1_63); \
+ __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \
+ __ret_63; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_s16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
+ int16x8_t __s0_64 = __p0_64; \
+ int16x8_t __s2_64 = __p2_64; \
+ int16x8_t __ret_64; \
+ __ret_64 = vsetq_lane_s16(vgetq_lane_s16(__s2_64, __p3_64), __s0_64, __p1_64); \
+ __ret_64; \
+})
+#else
+#define vcopyq_laneq_s16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
+ int16x8_t __s0_65 = __p0_65; \
+ int16x8_t __s2_65 = __p2_65; \
+ int16x8_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret_65; \
+ __ret_65 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_65, __p3_65), __rev0_65, __p1_65); \
+ __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_65; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_p8(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
+ poly8x8_t __s0_66 = __p0_66; \
+ poly8x16_t __s2_66 = __p2_66; \
+ poly8x8_t __ret_66; \
+ __ret_66 = vset_lane_p8(vgetq_lane_p8(__s2_66, __p3_66), __s0_66, __p1_66); \
+ __ret_66; \
+})
+#else
+#define vcopy_laneq_p8(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
+ poly8x8_t __s0_67 = __p0_67; \
+ poly8x16_t __s2_67 = __p2_67; \
+ poly8x8_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret_67; \
+ __ret_67 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_67, __p3_67), __rev0_67, __p1_67); \
+ __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_67; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_p16(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
+ poly16x4_t __s0_68 = __p0_68; \
+ poly16x8_t __s2_68 = __p2_68; \
+ poly16x4_t __ret_68; \
+ __ret_68 = vset_lane_p16(vgetq_lane_p16(__s2_68, __p3_68), __s0_68, __p1_68); \
+ __ret_68; \
+})
+#else
+#define vcopy_laneq_p16(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
+ poly16x4_t __s0_69 = __p0_69; \
+ poly16x8_t __s2_69 = __p2_69; \
+ poly16x4_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
+ poly16x8_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x4_t __ret_69; \
+ __ret_69 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_69, __p3_69), __rev0_69, __p1_69); \
+ __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
+ __ret_69; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u8(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
+ uint8x8_t __s0_70 = __p0_70; \
+ uint8x16_t __s2_70 = __p2_70; \
+ uint8x8_t __ret_70; \
+ __ret_70 = vset_lane_u8(vgetq_lane_u8(__s2_70, __p3_70), __s0_70, __p1_70); \
+ __ret_70; \
+})
+#else
+#define vcopy_laneq_u8(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
+ uint8x8_t __s0_71 = __p0_71; \
+ uint8x16_t __s2_71 = __p2_71; \
+ uint8x8_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret_71; \
+ __ret_71 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_71, __p3_71), __rev0_71, __p1_71); \
+ __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_71; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
+ uint32x2_t __s0_72 = __p0_72; \
+ uint32x4_t __s2_72 = __p2_72; \
+ uint32x2_t __ret_72; \
+ __ret_72 = vset_lane_u32(vgetq_lane_u32(__s2_72, __p3_72), __s0_72, __p1_72); \
+ __ret_72; \
+})
+#else
+#define vcopy_laneq_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
+ uint32x2_t __s0_73 = __p0_73; \
+ uint32x4_t __s2_73 = __p2_73; \
+ uint32x2_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
+ uint32x4_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 3, 2, 1, 0); \
+ uint32x2_t __ret_73; \
+ __ret_73 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_73, __p3_73), __rev0_73, __p1_73); \
+ __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
+ __ret_73; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u64(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
+ uint64x1_t __s0_74 = __p0_74; \
+ uint64x2_t __s2_74 = __p2_74; \
+ uint64x1_t __ret_74; \
+ __ret_74 = vset_lane_u64(vgetq_lane_u64(__s2_74, __p3_74), __s0_74, __p1_74); \
+ __ret_74; \
+})
+#else
+#define vcopy_laneq_u64(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
+ uint64x1_t __s0_75 = __p0_75; \
+ uint64x2_t __s2_75 = __p2_75; \
+ uint64x2_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 1, 0); \
+ uint64x1_t __ret_75; \
+ __ret_75 = __noswap_vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_75, __p3_75), __s0_75, __p1_75); \
+ __ret_75; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u16(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
+ uint16x4_t __s0_76 = __p0_76; \
+ uint16x8_t __s2_76 = __p2_76; \
+ uint16x4_t __ret_76; \
+ __ret_76 = vset_lane_u16(vgetq_lane_u16(__s2_76, __p3_76), __s0_76, __p1_76); \
+ __ret_76; \
+})
+#else
+#define vcopy_laneq_u16(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
+ uint16x4_t __s0_77 = __p0_77; \
+ uint16x8_t __s2_77 = __p2_77; \
+ uint16x4_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 3, 2, 1, 0); \
+ uint16x8_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __ret_77; \
+ __ret_77 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_77, __p3_77), __rev0_77, __p1_77); \
+ __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 3, 2, 1, 0); \
+ __ret_77; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s8(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
+ int8x8_t __s0_78 = __p0_78; \
+ int8x16_t __s2_78 = __p2_78; \
+ int8x8_t __ret_78; \
+ __ret_78 = vset_lane_s8(vgetq_lane_s8(__s2_78, __p3_78), __s0_78, __p1_78); \
+ __ret_78; \
+})
+#else
+#define vcopy_laneq_s8(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
+ int8x8_t __s0_79 = __p0_79; \
+ int8x16_t __s2_79 = __p2_79; \
+ int8x8_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret_79; \
+ __ret_79 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_79, __p3_79), __rev0_79, __p1_79); \
+ __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_79; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_f32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
+ float32x2_t __s0_80 = __p0_80; \
+ float32x4_t __s2_80 = __p2_80; \
+ float32x2_t __ret_80; \
+ __ret_80 = vset_lane_f32(vgetq_lane_f32(__s2_80, __p3_80), __s0_80, __p1_80); \
+ __ret_80; \
+})
+#else
+#define vcopy_laneq_f32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
+ float32x2_t __s0_81 = __p0_81; \
+ float32x4_t __s2_81 = __p2_81; \
+ float32x2_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 1, 0); \
+ float32x4_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \
+ float32x2_t __ret_81; \
+ __ret_81 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_81, __p3_81), __rev0_81, __p1_81); \
+ __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 1, 0); \
+ __ret_81; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
+ int32x2_t __s0_82 = __p0_82; \
+ int32x4_t __s2_82 = __p2_82; \
+ int32x2_t __ret_82; \
+ __ret_82 = vset_lane_s32(vgetq_lane_s32(__s2_82, __p3_82), __s0_82, __p1_82); \
+ __ret_82; \
+})
+#else
+#define vcopy_laneq_s32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
+ int32x2_t __s0_83 = __p0_83; \
+ int32x4_t __s2_83 = __p2_83; \
+ int32x2_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \
+ int32x4_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 3, 2, 1, 0); \
+ int32x2_t __ret_83; \
+ __ret_83 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_83, __p3_83), __rev0_83, __p1_83); \
+ __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \
+ __ret_83; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s64(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
+ int64x1_t __s0_84 = __p0_84; \
+ int64x2_t __s2_84 = __p2_84; \
+ int64x1_t __ret_84; \
+ __ret_84 = vset_lane_s64(vgetq_lane_s64(__s2_84, __p3_84), __s0_84, __p1_84); \
+ __ret_84; \
+})
+#else
+#define vcopy_laneq_s64(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
+ int64x1_t __s0_85 = __p0_85; \
+ int64x2_t __s2_85 = __p2_85; \
+ int64x2_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 1, 0); \
+ int64x1_t __ret_85; \
+ __ret_85 = __noswap_vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_85, __p3_85), __s0_85, __p1_85); \
+ __ret_85; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \
+ int16x4_t __s0_86 = __p0_86; \
+ int16x8_t __s2_86 = __p2_86; \
+ int16x4_t __ret_86; \
+ __ret_86 = vset_lane_s16(vgetq_lane_s16(__s2_86, __p3_86), __s0_86, __p1_86); \
+ __ret_86; \
+})
+#else
+#define vcopy_laneq_s16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \
+ int16x4_t __s0_87 = __p0_87; \
+ int16x8_t __s2_87 = __p2_87; \
+ int16x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
+ int16x8_t __rev2_87; __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __ret_87; \
+ __ret_87 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_87, __p3_87), __rev0_87, __p1_87); \
+ __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
+ __ret_87; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vcreate_p64(uint64_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vcreate_p64(uint64_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vcreate_f64(uint64_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai float64x1_t vcreate_f64(uint64_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vcvts_f32_s32(int32_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
+ return __ret;
+}
+#else
+__ai float32_t vcvts_f32_s32(int32_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vcvts_f32_u32(uint32_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
+ return __ret;
+}
+#else
+__ai float32_t vcvts_f32_u32(uint32_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vcvtd_f64_s64(int64_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
+ return __ret;
+}
+#else
+__ai float64_t vcvtd_f64_s64(int64_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
+ return __ret;
+}
+#else
+__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
+ float16x8_t __ret;
+ __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
+ return __ret;
+}
+#else
+__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
+ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float16x8_t __ret;
+ __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
+ float32x4_t __ret;
+ __ret = vcvt_f32_f16(vget_high_f16(__p0));
+ return __ret;
+}
+#else
+__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
+ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+ float32x4_t __ret;
+ __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
+ return __ret;
+}
+#else
+__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x4_t __ret;
+ __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
+ float64x2_t __ret;
+ __ret = vcvt_f64_f32(vget_high_f32(__p0));
+ return __ret;
+}
+#else
+__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float64x2_t __ret;
+ __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#else
+#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#else
+#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vcvts_s32_f32(float32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
+ return __ret;
+}
+#else
+__ai int32_t vcvts_s32_f32(float32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcvtd_s64_f64(float64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vcvtd_s64_f64(float64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcvts_u32_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vcvts_u32_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vcvtas_s32_f32(float32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
+ return __ret;
+}
+#else
+__ai int32_t vcvtas_s32_f32(float32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcvtad_s64_f64(float64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vcvtad_s64_f64(float64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vcvtms_s32_f32(float32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
+ return __ret;
+}
+#else
+__ai int32_t vcvtms_s32_f32(float32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vcvtns_s32_f32(float32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
+ return __ret;
+}
+#else
+__ai int32_t vcvtns_s32_f32(float32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vcvtps_s32_f32(float32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
+ return __ret;
+}
+#else
+__ai int32_t vcvtps_s32_f32(float32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
+ return __ret;
+}
+#else
+__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+ float32x4_t __ret;
+ __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
+ return __ret;
+}
+#else
+__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x4_t __ret;
+ __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = __p0 / __p1;
+ return __ret;
+}
+#else
+__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __rev0 / __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __p0 / __p1;
+ return __ret;
+}
+#else
+__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 / __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = __p0 / __p1;
+ return __ret;
+}
+#else
+__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = __p0 / __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __p0 / __p1;
+ return __ret;
+}
+#else
+__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 / __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_p64(__p0, __p1) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_p64(__p0, __p1) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
+ float16x4_t __s0 = __p0; \
+ float16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
+ float16x4_t __s0 = __p0; \
+ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
+ float16x4_t __s0 = __p0; \
+ float16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
+ float16x4_t __s0 = __p0; \
+ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ poly64x1_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ poly64x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
+ float16x8_t __s0 = __p0; \
+ float16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
+ float16x8_t __s0 = __p0; \
+ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x1_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x1_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
+ float16x8_t __s0 = __p0; \
+ float16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
+ float16x8_t __s0 = __p0; \
+ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x1_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vdupq_n_f64(float64_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai float64x2_t vdupq_n_f64(float64_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vdup_n_f64(float64_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai float64x1_t vdup_n_f64(float64_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
+ __ret; \
+})
+#else
+#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
+ __ret; \
+})
+#else
+#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
+ __ret; \
+})
+#else
+#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
+ __ret; \
+})
+#else
+#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64_t __s1 = __p1; \
+ float64x1_t __s2 = __p2; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64_t __s1 = __p1; \
+ float64x1_t __s2 = __p2; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
+ __ret; \
+})
+#define __noswap_vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64_t __s1 = __p1; \
+ float64x1_t __s2 = __p2; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__rev2, __p3); \
+ __ret; \
+})
+#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x1_t __s2 = __p2; \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
+ __ret; \
+})
+#else
+#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x1_t __s2 = __p2; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x1_t __s2 = __p2; \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
+ __ret; \
+})
+#else
+#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __s2 = __p2; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
+ __ret; \
+})
+#else
+#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __s2 = __p2; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
+ __ret; \
+})
+#define __noswap_vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __s2 = __p2; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
+ __ret; \
+})
+#else
+#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64_t __s1 = __p1; \
+ float64x2_t __s2 = __p2; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64_t __s1 = __p1; \
+ float64x2_t __s2 = __p2; \
+ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__rev2, __p3); \
+ __ret; \
+})
+#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64_t __s1 = __p1; \
+ float64x2_t __s2 = __p2; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__rev2, __p3); \
+ __ret; \
+})
+#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __s2 = __p2; \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
+ __ret; \
+})
+#else
+#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __s2 = __p2; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __s2 = __p2; \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
+ __ret; \
+})
+#else
+#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x2_t __s2 = __p2; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
+ __ret; \
+})
+#else
+#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x2_t __s2 = __p2; \
+ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
+ __ret; \
+})
+#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x2_t __s2 = __p2; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
+ __ret; \
+})
+#else
+#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+ float64x2_t __ret;
+ __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
+ return __ret;
+}
+#else
+__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __ret;
+ __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
+ return __ret;
+}
+#else
+__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __ret;
+ __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
+ return __ret;
+}
+#else
+__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vfmsq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vfmsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float64x2_t __noswap_vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vfmsq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vfmsq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vfmsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float32x4_t __noswap_vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vfmsq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vfms_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vfms_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vfms_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vfms_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float32x2_t __noswap_vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vfms_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsd_lane_f64(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \
+ float64_t __s0_88 = __p0_88; \
+ float64_t __s1_88 = __p1_88; \
+ float64x1_t __s2_88 = __p2_88; \
+ float64_t __ret_88; \
+ __ret_88 = vfmad_lane_f64(__s0_88, __s1_88, -__s2_88, __p3_88); \
+ __ret_88; \
+})
+#else
+#define vfmsd_lane_f64(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \
+ float64_t __s0_89 = __p0_89; \
+ float64_t __s1_89 = __p1_89; \
+ float64x1_t __s2_89 = __p2_89; \
+ float64_t __ret_89; \
+ __ret_89 = __noswap_vfmad_lane_f64(__s0_89, __s1_89, -__s2_89, __p3_89); \
+ __ret_89; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmss_lane_f32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \
+ float32_t __s0_90 = __p0_90; \
+ float32_t __s1_90 = __p1_90; \
+ float32x2_t __s2_90 = __p2_90; \
+ float32_t __ret_90; \
+ __ret_90 = vfmas_lane_f32(__s0_90, __s1_90, -__s2_90, __p3_90); \
+ __ret_90; \
+})
+#else
+#define vfmss_lane_f32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \
+ float32_t __s0_91 = __p0_91; \
+ float32_t __s1_91 = __p1_91; \
+ float32x2_t __s2_91 = __p2_91; \
+ float32x2_t __rev2_91; __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 1, 0); \
+ float32_t __ret_91; \
+ __ret_91 = __noswap_vfmas_lane_f32(__s0_91, __s1_91, -__rev2_91, __p3_91); \
+ __ret_91; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_lane_f64(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \
+ float64x2_t __s0_92 = __p0_92; \
+ float64x2_t __s1_92 = __p1_92; \
+ float64x1_t __s2_92 = __p2_92; \
+ float64x2_t __ret_92; \
+ __ret_92 = vfmaq_lane_f64(__s0_92, __s1_92, -__s2_92, __p3_92); \
+ __ret_92; \
+})
+#else
+#define vfmsq_lane_f64(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \
+ float64x2_t __s0_93 = __p0_93; \
+ float64x2_t __s1_93 = __p1_93; \
+ float64x1_t __s2_93 = __p2_93; \
+ float64x2_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
+ float64x2_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
+ float64x2_t __ret_93; \
+ __ret_93 = __noswap_vfmaq_lane_f64(__rev0_93, __rev1_93, -__s2_93, __p3_93); \
+ __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
+ __ret_93; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_lane_f32(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \
+ float32x4_t __s0_94 = __p0_94; \
+ float32x4_t __s1_94 = __p1_94; \
+ float32x2_t __s2_94 = __p2_94; \
+ float32x4_t __ret_94; \
+ __ret_94 = vfmaq_lane_f32(__s0_94, __s1_94, -__s2_94, __p3_94); \
+ __ret_94; \
+})
+#else
+#define vfmsq_lane_f32(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \
+ float32x4_t __s0_95 = __p0_95; \
+ float32x4_t __s1_95 = __p1_95; \
+ float32x2_t __s2_95 = __p2_95; \
+ float32x4_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \
+ float32x4_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
+ float32x2_t __rev2_95; __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 1, 0); \
+ float32x4_t __ret_95; \
+ __ret_95 = __noswap_vfmaq_lane_f32(__rev0_95, __rev1_95, -__rev2_95, __p3_95); \
+ __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \
+ __ret_95; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_lane_f64(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \
+ float64x1_t __s0_96 = __p0_96; \
+ float64x1_t __s1_96 = __p1_96; \
+ float64x1_t __s2_96 = __p2_96; \
+ float64x1_t __ret_96; \
+ __ret_96 = vfma_lane_f64(__s0_96, __s1_96, -__s2_96, __p3_96); \
+ __ret_96; \
+})
+#else
+#define vfms_lane_f64(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \
+ float64x1_t __s0_97 = __p0_97; \
+ float64x1_t __s1_97 = __p1_97; \
+ float64x1_t __s2_97 = __p2_97; \
+ float64x1_t __ret_97; \
+ __ret_97 = __noswap_vfma_lane_f64(__s0_97, __s1_97, -__s2_97, __p3_97); \
+ __ret_97; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_lane_f32(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \
+ float32x2_t __s0_98 = __p0_98; \
+ float32x2_t __s1_98 = __p1_98; \
+ float32x2_t __s2_98 = __p2_98; \
+ float32x2_t __ret_98; \
+ __ret_98 = vfma_lane_f32(__s0_98, __s1_98, -__s2_98, __p3_98); \
+ __ret_98; \
+})
+#else
+#define vfms_lane_f32(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \
+ float32x2_t __s0_99 = __p0_99; \
+ float32x2_t __s1_99 = __p1_99; \
+ float32x2_t __s2_99 = __p2_99; \
+ float32x2_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \
+ float32x2_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \
+ float32x2_t __rev2_99; __rev2_99 = __builtin_shufflevector(__s2_99, __s2_99, 1, 0); \
+ float32x2_t __ret_99; \
+ __ret_99 = __noswap_vfma_lane_f32(__rev0_99, __rev1_99, -__rev2_99, __p3_99); \
+ __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \
+ __ret_99; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsd_laneq_f64(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \
+ float64_t __s0_100 = __p0_100; \
+ float64_t __s1_100 = __p1_100; \
+ float64x2_t __s2_100 = __p2_100; \
+ float64_t __ret_100; \
+ __ret_100 = vfmad_laneq_f64(__s0_100, __s1_100, -__s2_100, __p3_100); \
+ __ret_100; \
+})
+#else
+#define vfmsd_laneq_f64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \
+ float64_t __s0_101 = __p0_101; \
+ float64_t __s1_101 = __p1_101; \
+ float64x2_t __s2_101 = __p2_101; \
+ float64x2_t __rev2_101; __rev2_101 = __builtin_shufflevector(__s2_101, __s2_101, 1, 0); \
+ float64_t __ret_101; \
+ __ret_101 = __noswap_vfmad_laneq_f64(__s0_101, __s1_101, -__rev2_101, __p3_101); \
+ __ret_101; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmss_laneq_f32(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \
+ float32_t __s0_102 = __p0_102; \
+ float32_t __s1_102 = __p1_102; \
+ float32x4_t __s2_102 = __p2_102; \
+ float32_t __ret_102; \
+ __ret_102 = vfmas_laneq_f32(__s0_102, __s1_102, -__s2_102, __p3_102); \
+ __ret_102; \
+})
+#else
+#define vfmss_laneq_f32(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \
+ float32_t __s0_103 = __p0_103; \
+ float32_t __s1_103 = __p1_103; \
+ float32x4_t __s2_103 = __p2_103; \
+ float32x4_t __rev2_103; __rev2_103 = __builtin_shufflevector(__s2_103, __s2_103, 3, 2, 1, 0); \
+ float32_t __ret_103; \
+ __ret_103 = __noswap_vfmas_laneq_f32(__s0_103, __s1_103, -__rev2_103, __p3_103); \
+ __ret_103; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f64(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \
+ float64x2_t __s0_104 = __p0_104; \
+ float64x2_t __s1_104 = __p1_104; \
+ float64x2_t __s2_104 = __p2_104; \
+ float64x2_t __ret_104; \
+ __ret_104 = vfmaq_laneq_f64(__s0_104, __s1_104, -__s2_104, __p3_104); \
+ __ret_104; \
+})
+#else
+#define vfmsq_laneq_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \
+ float64x2_t __s0_105 = __p0_105; \
+ float64x2_t __s1_105 = __p1_105; \
+ float64x2_t __s2_105 = __p2_105; \
+ float64x2_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 1, 0); \
+ float64x2_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 1, 0); \
+ float64x2_t __rev2_105; __rev2_105 = __builtin_shufflevector(__s2_105, __s2_105, 1, 0); \
+ float64x2_t __ret_105; \
+ __ret_105 = __noswap_vfmaq_laneq_f64(__rev0_105, __rev1_105, -__rev2_105, __p3_105); \
+ __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 1, 0); \
+ __ret_105; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \
+ float32x4_t __s0_106 = __p0_106; \
+ float32x4_t __s1_106 = __p1_106; \
+ float32x4_t __s2_106 = __p2_106; \
+ float32x4_t __ret_106; \
+ __ret_106 = vfmaq_laneq_f32(__s0_106, __s1_106, -__s2_106, __p3_106); \
+ __ret_106; \
+})
+#else
+#define vfmsq_laneq_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \
+ float32x4_t __s0_107 = __p0_107; \
+ float32x4_t __s1_107 = __p1_107; \
+ float32x4_t __s2_107 = __p2_107; \
+ float32x4_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 3, 2, 1, 0); \
+ float32x4_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 3, 2, 1, 0); \
+ float32x4_t __rev2_107; __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 3, 2, 1, 0); \
+ float32x4_t __ret_107; \
+ __ret_107 = __noswap_vfmaq_laneq_f32(__rev0_107, __rev1_107, -__rev2_107, __p3_107); \
+ __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 3, 2, 1, 0); \
+ __ret_107; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \
+ float64x1_t __s0_108 = __p0_108; \
+ float64x1_t __s1_108 = __p1_108; \
+ float64x2_t __s2_108 = __p2_108; \
+ float64x1_t __ret_108; \
+ __ret_108 = vfma_laneq_f64(__s0_108, __s1_108, -__s2_108, __p3_108); \
+ __ret_108; \
+})
+#else
+#define vfms_laneq_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \
+ float64x1_t __s0_109 = __p0_109; \
+ float64x1_t __s1_109 = __p1_109; \
+ float64x2_t __s2_109 = __p2_109; \
+ float64x2_t __rev2_109; __rev2_109 = __builtin_shufflevector(__s2_109, __s2_109, 1, 0); \
+ float64x1_t __ret_109; \
+ __ret_109 = __noswap_vfma_laneq_f64(__s0_109, __s1_109, -__rev2_109, __p3_109); \
+ __ret_109; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
+ float32x2_t __s0_110 = __p0_110; \
+ float32x2_t __s1_110 = __p1_110; \
+ float32x4_t __s2_110 = __p2_110; \
+ float32x2_t __ret_110; \
+ __ret_110 = vfma_laneq_f32(__s0_110, __s1_110, -__s2_110, __p3_110); \
+ __ret_110; \
+})
+#else
+#define vfms_laneq_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
+ float32x2_t __s0_111 = __p0_111; \
+ float32x2_t __s1_111 = __p1_111; \
+ float32x4_t __s2_111 = __p2_111; \
+ float32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
+ float32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
+ float32x4_t __rev2_111; __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 3, 2, 1, 0); \
+ float32x2_t __ret_111; \
+ __ret_111 = __noswap_vfma_laneq_f32(__rev0_111, __rev1_111, -__rev2_111, __p3_111); \
+ __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
+ __ret_111; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+ float64x2_t __ret;
+ __ret = vfmsq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
+ return __ret;
+}
+#else
+__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __noswap_vfmsq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __ret;
+ __ret = vfmsq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
+ return __ret;
+}
+#else
+__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __noswap_vfmsq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __ret;
+ __ret = vfms_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
+ return __ret;
+}
+#else
+__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __noswap_vfms_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
+ poly64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1);
+ return __ret;
+}
+#else
+__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1);
+ return __ret;
+}
+__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
+ poly64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vget_high_f64(float64x2_t __p0) {
+ float64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1);
+ return __ret;
+}
+#else
+__ai float64x1_t vget_high_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_p64(__p0, __p1) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64_t __ret; \
+ __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_p64(__p0, __p1) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64_t __ret; \
+ __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_p64(__p0, __p1) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64_t __ret; \
+ __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64_t __ret; \
+ __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ poly64_t __ret; \
+ __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64_t __ret; \
+ __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64_t __ret; \
+ __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
+ poly64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0);
+ return __ret;
+}
+#else
+__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vget_low_f64(float64x2_t __p0) {
+ float64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0);
+ return __ret;
+}
+#else
+__ai float64x1_t vget_low_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p64(__p0) __extension__ ({ \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
+ __ret; \
+})
+#else
+#define vld1_p64(__p0) __extension__ ({ \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64(__p0) __extension__ ({ \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
+ __ret; \
+})
+#else
+#define vld1q_p64(__p0) __extension__ ({ \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64(__p0) __extension__ ({ \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
+ __ret; \
+})
+#else
+#define vld1q_f64(__p0) __extension__ ({ \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f64(__p0) __extension__ ({ \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
+ __ret; \
+})
+#else
+#define vld1_f64(__p0) __extension__ ({ \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_p64(__p0) __extension__ ({ \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
+ __ret; \
+})
+#else
+#define vld1_dup_p64(__p0) __extension__ ({ \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_p64(__p0) __extension__ ({ \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
+ __ret; \
+})
+#else
+#define vld1q_dup_p64(__p0) __extension__ ({ \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_f64(__p0) __extension__ ({ \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
+ __ret; \
+})
+#else
+#define vld1q_dup_f64(__p0) __extension__ ({ \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_f64(__p0) __extension__ ({ \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
+ __ret; \
+})
+#else
+#define vld1_dup_f64(__p0) __extension__ ({ \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
+ __ret; \
+})
+#else
+#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
+ __ret; \
+})
+#else
+#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
+ __ret; \
+})
+#else
+#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
+ __ret; \
+})
+#else
+#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p8_x2(__p0) __extension__ ({ \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld1_p8_x2(__p0) __extension__ ({ \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p64_x2(__p0) __extension__ ({ \
+ poly64x1x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#else
+#define vld1_p64_x2(__p0) __extension__ ({ \
+ poly64x1x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p16_x2(__p0) __extension__ ({ \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld1_p16_x2(__p0) __extension__ ({ \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p8_x2(__p0) __extension__ ({ \
+ poly8x16x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld1q_p8_x2(__p0) __extension__ ({ \
+ poly8x16x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64_x2(__p0) __extension__ ({ \
+ poly64x2x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
+ __ret; \
+})
+#else
+#define vld1q_p64_x2(__p0) __extension__ ({ \
+ poly64x2x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p16_x2(__p0) __extension__ ({ \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld1q_p16_x2(__p0) __extension__ ({ \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u8_x2(__p0) __extension__ ({ \
+ uint8x16x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld1q_u8_x2(__p0) __extension__ ({ \
+ uint8x16x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u32_x2(__p0) __extension__ ({ \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld1q_u32_x2(__p0) __extension__ ({ \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u64_x2(__p0) __extension__ ({ \
+ uint64x2x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
+ __ret; \
+})
+#else
+#define vld1q_u64_x2(__p0) __extension__ ({ \
+ uint64x2x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u16_x2(__p0) __extension__ ({ \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld1q_u16_x2(__p0) __extension__ ({ \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s8_x2(__p0) __extension__ ({ \
+ int8x16x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld1q_s8_x2(__p0) __extension__ ({ \
+ int8x16x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64_x2(__p0) __extension__ ({ \
+ float64x2x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
+ __ret; \
+})
+#else
+#define vld1q_f64_x2(__p0) __extension__ ({ \
+ float64x2x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f32_x2(__p0) __extension__ ({ \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld1q_f32_x2(__p0) __extension__ ({ \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f16_x2(__p0) __extension__ ({ \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld1q_f16_x2(__p0) __extension__ ({ \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s32_x2(__p0) __extension__ ({ \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld1q_s32_x2(__p0) __extension__ ({ \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s64_x2(__p0) __extension__ ({ \
+ int64x2x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
+ __ret; \
+})
+#else
+#define vld1q_s64_x2(__p0) __extension__ ({ \
+ int64x2x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s16_x2(__p0) __extension__ ({ \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld1q_s16_x2(__p0) __extension__ ({ \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u8_x2(__p0) __extension__ ({ \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld1_u8_x2(__p0) __extension__ ({ \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u32_x2(__p0) __extension__ ({ \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld1_u32_x2(__p0) __extension__ ({ \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u64_x2(__p0) __extension__ ({ \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld1_u64_x2(__p0) __extension__ ({ \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u16_x2(__p0) __extension__ ({ \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld1_u16_x2(__p0) __extension__ ({ \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s8_x2(__p0) __extension__ ({ \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld1_s8_x2(__p0) __extension__ ({ \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f64_x2(__p0) __extension__ ({ \
+ float64x1x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#else
+#define vld1_f64_x2(__p0) __extension__ ({ \
+ float64x1x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f32_x2(__p0) __extension__ ({ \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld1_f32_x2(__p0) __extension__ ({ \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f16_x2(__p0) __extension__ ({ \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld1_f16_x2(__p0) __extension__ ({ \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s32_x2(__p0) __extension__ ({ \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld1_s32_x2(__p0) __extension__ ({ \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s64_x2(__p0) __extension__ ({ \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld1_s64_x2(__p0) __extension__ ({ \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s16_x2(__p0) __extension__ ({ \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld1_s16_x2(__p0) __extension__ ({ \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p8_x3(__p0) __extension__ ({ \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld1_p8_x3(__p0) __extension__ ({ \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p64_x3(__p0) __extension__ ({ \
+ poly64x1x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#else
+#define vld1_p64_x3(__p0) __extension__ ({ \
+ poly64x1x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p16_x3(__p0) __extension__ ({ \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld1_p16_x3(__p0) __extension__ ({ \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p8_x3(__p0) __extension__ ({ \
+ poly8x16x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld1q_p8_x3(__p0) __extension__ ({ \
+ poly8x16x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64_x3(__p0) __extension__ ({ \
+ poly64x2x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
+ __ret; \
+})
+#else
+#define vld1q_p64_x3(__p0) __extension__ ({ \
+ poly64x2x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p16_x3(__p0) __extension__ ({ \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld1q_p16_x3(__p0) __extension__ ({ \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u8_x3(__p0) __extension__ ({ \
+ uint8x16x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld1q_u8_x3(__p0) __extension__ ({ \
+ uint8x16x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u32_x3(__p0) __extension__ ({ \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld1q_u32_x3(__p0) __extension__ ({ \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u64_x3(__p0) __extension__ ({ \
+ uint64x2x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
+ __ret; \
+})
+#else
+#define vld1q_u64_x3(__p0) __extension__ ({ \
+ uint64x2x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u16_x3(__p0) __extension__ ({ \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld1q_u16_x3(__p0) __extension__ ({ \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s8_x3(__p0) __extension__ ({ \
+ int8x16x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld1q_s8_x3(__p0) __extension__ ({ \
+ int8x16x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64_x3(__p0) __extension__ ({ \
+ float64x2x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
+ __ret; \
+})
+#else
+#define vld1q_f64_x3(__p0) __extension__ ({ \
+ float64x2x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f32_x3(__p0) __extension__ ({ \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld1q_f32_x3(__p0) __extension__ ({ \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f16_x3(__p0) __extension__ ({ \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld1q_f16_x3(__p0) __extension__ ({ \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s32_x3(__p0) __extension__ ({ \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld1q_s32_x3(__p0) __extension__ ({ \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s64_x3(__p0) __extension__ ({ \
+ int64x2x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
+ __ret; \
+})
+#else
+#define vld1q_s64_x3(__p0) __extension__ ({ \
+ int64x2x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s16_x3(__p0) __extension__ ({ \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld1q_s16_x3(__p0) __extension__ ({ \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u8_x3(__p0) __extension__ ({ \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld1_u8_x3(__p0) __extension__ ({ \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u32_x3(__p0) __extension__ ({ \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld1_u32_x3(__p0) __extension__ ({ \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u64_x3(__p0) __extension__ ({ \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld1_u64_x3(__p0) __extension__ ({ \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u16_x3(__p0) __extension__ ({ \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld1_u16_x3(__p0) __extension__ ({ \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s8_x3(__p0) __extension__ ({ \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld1_s8_x3(__p0) __extension__ ({ \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f64_x3(__p0) __extension__ ({ \
+ float64x1x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#else
+#define vld1_f64_x3(__p0) __extension__ ({ \
+ float64x1x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f32_x3(__p0) __extension__ ({ \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld1_f32_x3(__p0) __extension__ ({ \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f16_x3(__p0) __extension__ ({ \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld1_f16_x3(__p0) __extension__ ({ \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s32_x3(__p0) __extension__ ({ \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld1_s32_x3(__p0) __extension__ ({ \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s64_x3(__p0) __extension__ ({ \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld1_s64_x3(__p0) __extension__ ({ \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s16_x3(__p0) __extension__ ({ \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld1_s16_x3(__p0) __extension__ ({ \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p8_x4(__p0) __extension__ ({ \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld1_p8_x4(__p0) __extension__ ({ \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p64_x4(__p0) __extension__ ({ \
+ poly64x1x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#else
+#define vld1_p64_x4(__p0) __extension__ ({ \
+ poly64x1x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p16_x4(__p0) __extension__ ({ \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld1_p16_x4(__p0) __extension__ ({ \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p8_x4(__p0) __extension__ ({ \
+ poly8x16x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld1q_p8_x4(__p0) __extension__ ({ \
+ poly8x16x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p64_x4(__p0) __extension__ ({ \
+ poly64x2x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
+ __ret; \
+})
+#else
+#define vld1q_p64_x4(__p0) __extension__ ({ \
+ poly64x2x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p16_x4(__p0) __extension__ ({ \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld1q_p16_x4(__p0) __extension__ ({ \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u8_x4(__p0) __extension__ ({ \
+ uint8x16x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld1q_u8_x4(__p0) __extension__ ({ \
+ uint8x16x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u32_x4(__p0) __extension__ ({ \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld1q_u32_x4(__p0) __extension__ ({ \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u64_x4(__p0) __extension__ ({ \
+ uint64x2x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
+ __ret; \
+})
+#else
+#define vld1q_u64_x4(__p0) __extension__ ({ \
+ uint64x2x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u16_x4(__p0) __extension__ ({ \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld1q_u16_x4(__p0) __extension__ ({ \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s8_x4(__p0) __extension__ ({ \
+ int8x16x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld1q_s8_x4(__p0) __extension__ ({ \
+ int8x16x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f64_x4(__p0) __extension__ ({ \
+ float64x2x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
+ __ret; \
+})
+#else
+#define vld1q_f64_x4(__p0) __extension__ ({ \
+ float64x2x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f32_x4(__p0) __extension__ ({ \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld1q_f32_x4(__p0) __extension__ ({ \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f16_x4(__p0) __extension__ ({ \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld1q_f16_x4(__p0) __extension__ ({ \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s32_x4(__p0) __extension__ ({ \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld1q_s32_x4(__p0) __extension__ ({ \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s64_x4(__p0) __extension__ ({ \
+ int64x2x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
+ __ret; \
+})
+#else
+#define vld1q_s64_x4(__p0) __extension__ ({ \
+ int64x2x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s16_x4(__p0) __extension__ ({ \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld1q_s16_x4(__p0) __extension__ ({ \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u8_x4(__p0) __extension__ ({ \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld1_u8_x4(__p0) __extension__ ({ \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u32_x4(__p0) __extension__ ({ \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld1_u32_x4(__p0) __extension__ ({ \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u64_x4(__p0) __extension__ ({ \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld1_u64_x4(__p0) __extension__ ({ \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u16_x4(__p0) __extension__ ({ \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld1_u16_x4(__p0) __extension__ ({ \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s8_x4(__p0) __extension__ ({ \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld1_s8_x4(__p0) __extension__ ({ \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f64_x4(__p0) __extension__ ({ \
+ float64x1x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#else
+#define vld1_f64_x4(__p0) __extension__ ({ \
+ float64x1x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f32_x4(__p0) __extension__ ({ \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld1_f32_x4(__p0) __extension__ ({ \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f16_x4(__p0) __extension__ ({ \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld1_f16_x4(__p0) __extension__ ({ \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s32_x4(__p0) __extension__ ({ \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld1_s32_x4(__p0) __extension__ ({ \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s64_x4(__p0) __extension__ ({ \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld1_s64_x4(__p0) __extension__ ({ \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s16_x4(__p0) __extension__ ({ \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld1_s16_x4(__p0) __extension__ ({ \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_p64(__p0) __extension__ ({ \
+ poly64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#else
+#define vld2_p64(__p0) __extension__ ({ \
+ poly64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_p64(__p0) __extension__ ({ \
+ poly64x2x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 38); \
+ __ret; \
+})
+#else
+#define vld2q_p64(__p0) __extension__ ({ \
+ poly64x2x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_u64(__p0) __extension__ ({ \
+ uint64x2x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 51); \
+ __ret; \
+})
+#else
+#define vld2q_u64(__p0) __extension__ ({ \
+ uint64x2x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_f64(__p0) __extension__ ({ \
+ float64x2x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 42); \
+ __ret; \
+})
+#else
+#define vld2q_f64(__p0) __extension__ ({ \
+ float64x2x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_s64(__p0) __extension__ ({ \
+ int64x2x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 35); \
+ __ret; \
+})
+#else
+#define vld2q_s64(__p0) __extension__ ({ \
+ int64x2x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_f64(__p0) __extension__ ({ \
+ float64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#else
+#define vld2_f64(__p0) __extension__ ({ \
+ float64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_p64(__p0) __extension__ ({ \
+ poly64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#else
+#define vld2_dup_p64(__p0) __extension__ ({ \
+ poly64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_p8(__p0) __extension__ ({ \
+ poly8x16x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld2q_dup_p8(__p0) __extension__ ({ \
+ poly8x16x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_p64(__p0) __extension__ ({ \
+ poly64x2x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
+ __ret; \
+})
+#else
+#define vld2q_dup_p64(__p0) __extension__ ({ \
+ poly64x2x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_p16(__p0) __extension__ ({ \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld2q_dup_p16(__p0) __extension__ ({ \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_u8(__p0) __extension__ ({ \
+ uint8x16x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld2q_dup_u8(__p0) __extension__ ({ \
+ uint8x16x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_u32(__p0) __extension__ ({ \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld2q_dup_u32(__p0) __extension__ ({ \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_u64(__p0) __extension__ ({ \
+ uint64x2x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
+ __ret; \
+})
+#else
+#define vld2q_dup_u64(__p0) __extension__ ({ \
+ uint64x2x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_u16(__p0) __extension__ ({ \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld2q_dup_u16(__p0) __extension__ ({ \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_s8(__p0) __extension__ ({ \
+ int8x16x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld2q_dup_s8(__p0) __extension__ ({ \
+ int8x16x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_f64(__p0) __extension__ ({ \
+ float64x2x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
+ __ret; \
+})
+#else
+#define vld2q_dup_f64(__p0) __extension__ ({ \
+ float64x2x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_f32(__p0) __extension__ ({ \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld2q_dup_f32(__p0) __extension__ ({ \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_f16(__p0) __extension__ ({ \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld2q_dup_f16(__p0) __extension__ ({ \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_s32(__p0) __extension__ ({ \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld2q_dup_s32(__p0) __extension__ ({ \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_s64(__p0) __extension__ ({ \
+ int64x2x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
+ __ret; \
+})
+#else
+#define vld2q_dup_s64(__p0) __extension__ ({ \
+ int64x2x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_s16(__p0) __extension__ ({ \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld2q_dup_s16(__p0) __extension__ ({ \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_f64(__p0) __extension__ ({ \
+ float64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#else
+#define vld2_dup_f64(__p0) __extension__ ({ \
+ float64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x2_t __s1 = __p1; \
+ poly64x1x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
+ __ret; \
+})
+#else
+#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x2_t __s1 = __p1; \
+ poly64x1x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x2_t __s1 = __p1; \
+ poly8x16x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
+ __ret; \
+})
+#else
+#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x2_t __s1 = __p1; \
+ poly8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x2_t __s1 = __p1; \
+ poly64x2x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
+ __ret; \
+})
+#else
+#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x2_t __s1 = __p1; \
+ poly64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ poly64x2x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x2_t __s1 = __p1; \
+ uint8x16x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
+ __ret; \
+})
+#else
+#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x2_t __s1 = __p1; \
+ uint8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x2_t __s1 = __p1; \
+ uint64x2x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
+ __ret; \
+})
+#else
+#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x2_t __s1 = __p1; \
+ uint64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ uint64x2x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x2_t __s1 = __p1; \
+ int8x16x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
+ __ret; \
+})
+#else
+#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x2_t __s1 = __p1; \
+ int8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x2_t __s1 = __p1; \
+ float64x2x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 42); \
+ __ret; \
+})
+#else
+#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x2_t __s1 = __p1; \
+ float64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ float64x2x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x2_t __s1 = __p1; \
+ int64x2x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 35); \
+ __ret; \
+})
+#else
+#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x2_t __s1 = __p1; \
+ int64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ int64x2x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x2_t __s1 = __p1; \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
+ __ret; \
+})
+#else
+#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x2_t __s1 = __p1; \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x2_t __s1 = __p1; \
+ float64x1x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
+ __ret; \
+})
+#else
+#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x2_t __s1 = __p1; \
+ float64x1x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x2_t __s1 = __p1; \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
+ __ret; \
+})
+#else
+#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x2_t __s1 = __p1; \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_p64(__p0) __extension__ ({ \
+ poly64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#else
+#define vld3_p64(__p0) __extension__ ({ \
+ poly64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_p64(__p0) __extension__ ({ \
+ poly64x2x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 38); \
+ __ret; \
+})
+#else
+#define vld3q_p64(__p0) __extension__ ({ \
+ poly64x2x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_u64(__p0) __extension__ ({ \
+ uint64x2x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 51); \
+ __ret; \
+})
+#else
+#define vld3q_u64(__p0) __extension__ ({ \
+ uint64x2x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_f64(__p0) __extension__ ({ \
+ float64x2x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 42); \
+ __ret; \
+})
+#else
+#define vld3q_f64(__p0) __extension__ ({ \
+ float64x2x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_s64(__p0) __extension__ ({ \
+ int64x2x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 35); \
+ __ret; \
+})
+#else
+#define vld3q_s64(__p0) __extension__ ({ \
+ int64x2x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_f64(__p0) __extension__ ({ \
+ float64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#else
+#define vld3_f64(__p0) __extension__ ({ \
+ float64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_p64(__p0) __extension__ ({ \
+ poly64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#else
+#define vld3_dup_p64(__p0) __extension__ ({ \
+ poly64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_p8(__p0) __extension__ ({ \
+ poly8x16x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld3q_dup_p8(__p0) __extension__ ({ \
+ poly8x16x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_p64(__p0) __extension__ ({ \
+ poly64x2x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
+ __ret; \
+})
+#else
+#define vld3q_dup_p64(__p0) __extension__ ({ \
+ poly64x2x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_p16(__p0) __extension__ ({ \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld3q_dup_p16(__p0) __extension__ ({ \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_u8(__p0) __extension__ ({ \
+ uint8x16x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld3q_dup_u8(__p0) __extension__ ({ \
+ uint8x16x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_u32(__p0) __extension__ ({ \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld3q_dup_u32(__p0) __extension__ ({ \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_u64(__p0) __extension__ ({ \
+ uint64x2x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
+ __ret; \
+})
+#else
+#define vld3q_dup_u64(__p0) __extension__ ({ \
+ uint64x2x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_u16(__p0) __extension__ ({ \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld3q_dup_u16(__p0) __extension__ ({ \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_s8(__p0) __extension__ ({ \
+ int8x16x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld3q_dup_s8(__p0) __extension__ ({ \
+ int8x16x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_f64(__p0) __extension__ ({ \
+ float64x2x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
+ __ret; \
+})
+#else
+#define vld3q_dup_f64(__p0) __extension__ ({ \
+ float64x2x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_f32(__p0) __extension__ ({ \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld3q_dup_f32(__p0) __extension__ ({ \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_f16(__p0) __extension__ ({ \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld3q_dup_f16(__p0) __extension__ ({ \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_s32(__p0) __extension__ ({ \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld3q_dup_s32(__p0) __extension__ ({ \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_s64(__p0) __extension__ ({ \
+ int64x2x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
+ __ret; \
+})
+#else
+#define vld3q_dup_s64(__p0) __extension__ ({ \
+ int64x2x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_s16(__p0) __extension__ ({ \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld3q_dup_s16(__p0) __extension__ ({ \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_f64(__p0) __extension__ ({ \
+ float64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#else
+#define vld3_dup_f64(__p0) __extension__ ({ \
+ float64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x3_t __s1 = __p1; \
+ poly64x1x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
+ __ret; \
+})
+#else
+#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x3_t __s1 = __p1; \
+ poly64x1x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x3_t __s1 = __p1; \
+ poly8x16x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
+ __ret; \
+})
+#else
+#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x3_t __s1 = __p1; \
+ poly8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x3_t __s1 = __p1; \
+ poly64x2x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
+ __ret; \
+})
+#else
+#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x3_t __s1 = __p1; \
+ poly64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ poly64x2x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x3_t __s1 = __p1; \
+ uint8x16x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
+ __ret; \
+})
+#else
+#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x3_t __s1 = __p1; \
+ uint8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x3_t __s1 = __p1; \
+ uint64x2x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
+ __ret; \
+})
+#else
+#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x3_t __s1 = __p1; \
+ uint64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ uint64x2x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x3_t __s1 = __p1; \
+ int8x16x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
+ __ret; \
+})
+#else
+#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x3_t __s1 = __p1; \
+ int8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x3_t __s1 = __p1; \
+ float64x2x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
+ __ret; \
+})
+#else
+#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x3_t __s1 = __p1; \
+ float64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ float64x2x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x3_t __s1 = __p1; \
+ int64x2x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
+ __ret; \
+})
+#else
+#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x3_t __s1 = __p1; \
+ int64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ int64x2x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x3_t __s1 = __p1; \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
+ __ret; \
+})
+#else
+#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x3_t __s1 = __p1; \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x3_t __s1 = __p1; \
+ float64x1x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
+ __ret; \
+})
+#else
+#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x3_t __s1 = __p1; \
+ float64x1x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x3_t __s1 = __p1; \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
+ __ret; \
+})
+#else
+#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x3_t __s1 = __p1; \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_p64(__p0) __extension__ ({ \
+ poly64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#else
+#define vld4_p64(__p0) __extension__ ({ \
+ poly64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_p64(__p0) __extension__ ({ \
+ poly64x2x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 38); \
+ __ret; \
+})
+#else
+#define vld4q_p64(__p0) __extension__ ({ \
+ poly64x2x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_u64(__p0) __extension__ ({ \
+ uint64x2x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 51); \
+ __ret; \
+})
+#else
+#define vld4q_u64(__p0) __extension__ ({ \
+ uint64x2x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_f64(__p0) __extension__ ({ \
+ float64x2x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 42); \
+ __ret; \
+})
+#else
+#define vld4q_f64(__p0) __extension__ ({ \
+ float64x2x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_s64(__p0) __extension__ ({ \
+ int64x2x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 35); \
+ __ret; \
+})
+#else
+#define vld4q_s64(__p0) __extension__ ({ \
+ int64x2x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_f64(__p0) __extension__ ({ \
+ float64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#else
+#define vld4_f64(__p0) __extension__ ({ \
+ float64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_p64(__p0) __extension__ ({ \
+ poly64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#else
+#define vld4_dup_p64(__p0) __extension__ ({ \
+ poly64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_p8(__p0) __extension__ ({ \
+ poly8x16x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld4q_dup_p8(__p0) __extension__ ({ \
+ poly8x16x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_p64(__p0) __extension__ ({ \
+ poly64x2x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
+ __ret; \
+})
+#else
+#define vld4q_dup_p64(__p0) __extension__ ({ \
+ poly64x2x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_p16(__p0) __extension__ ({ \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld4q_dup_p16(__p0) __extension__ ({ \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_u8(__p0) __extension__ ({ \
+ uint8x16x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld4q_dup_u8(__p0) __extension__ ({ \
+ uint8x16x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_u32(__p0) __extension__ ({ \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld4q_dup_u32(__p0) __extension__ ({ \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_u64(__p0) __extension__ ({ \
+ uint64x2x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
+ __ret; \
+})
+#else
+#define vld4q_dup_u64(__p0) __extension__ ({ \
+ uint64x2x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_u16(__p0) __extension__ ({ \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld4q_dup_u16(__p0) __extension__ ({ \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_s8(__p0) __extension__ ({ \
+ int8x16x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld4q_dup_s8(__p0) __extension__ ({ \
+ int8x16x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_f64(__p0) __extension__ ({ \
+ float64x2x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
+ __ret; \
+})
+#else
+#define vld4q_dup_f64(__p0) __extension__ ({ \
+ float64x2x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_f32(__p0) __extension__ ({ \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld4q_dup_f32(__p0) __extension__ ({ \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_f16(__p0) __extension__ ({ \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld4q_dup_f16(__p0) __extension__ ({ \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_s32(__p0) __extension__ ({ \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld4q_dup_s32(__p0) __extension__ ({ \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_s64(__p0) __extension__ ({ \
+ int64x2x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
+ __ret; \
+})
+#else
+#define vld4q_dup_s64(__p0) __extension__ ({ \
+ int64x2x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_s16(__p0) __extension__ ({ \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld4q_dup_s16(__p0) __extension__ ({ \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_f64(__p0) __extension__ ({ \
+ float64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#else
+#define vld4_dup_f64(__p0) __extension__ ({ \
+ float64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x4_t __s1 = __p1; \
+ poly64x1x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
+ __ret; \
+})
+#else
+#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x4_t __s1 = __p1; \
+ poly64x1x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x4_t __s1 = __p1; \
+ poly8x16x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
+ __ret; \
+})
+#else
+#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x4_t __s1 = __p1; \
+ poly8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x4_t __s1 = __p1; \
+ poly64x2x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
+ __ret; \
+})
+#else
+#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x4_t __s1 = __p1; \
+ poly64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ poly64x2x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x4_t __s1 = __p1; \
+ uint8x16x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
+ __ret; \
+})
+#else
+#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x4_t __s1 = __p1; \
+ uint8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x4_t __s1 = __p1; \
+ uint64x2x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
+ __ret; \
+})
+#else
+#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x4_t __s1 = __p1; \
+ uint64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ uint64x2x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x4_t __s1 = __p1; \
+ int8x16x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
+ __ret; \
+})
+#else
+#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x4_t __s1 = __p1; \
+ int8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x4_t __s1 = __p1; \
+ float64x2x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
+ __ret; \
+})
+#else
+#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x4_t __s1 = __p1; \
+ float64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ float64x2x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x4_t __s1 = __p1; \
+ int64x2x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
+ __ret; \
+})
+#else
+#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x4_t __s1 = __p1; \
+ int64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ int64x2x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x4_t __s1 = __p1; \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
+ __ret; \
+})
+#else
+#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x4_t __s1 = __p1; \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x4_t __s1 = __p1; \
+ float64x1x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
+ __ret; \
+})
+#else
+#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x4_t __s1 = __p1; \
+ float64x1x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x4_t __s1 = __p1; \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
+ __ret; \
+})
+#else
+#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x4_t __s1 = __p1; \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vldrq_p128(__p0) __extension__ ({ \
+ poly128_t __ret; \
+ __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
+ __ret; \
+})
+#else
+#define vldrq_p128(__p0) __extension__ ({ \
+ poly128_t __ret; \
+ __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vmaxvq_s8(int8x16_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int8_t vmaxvq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vmaxvq_f64(float64x2_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float64_t vmaxvq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxvq_f32(float32x4_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vmaxvq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vmaxvq_s32(int32x4_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int32_t vmaxvq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vmaxvq_s16(int16x8_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int16_t vmaxvq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vmaxv_s8(int8x8_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int8_t vmaxv_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmaxv_f32(float32x2_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vmaxv_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vmaxv_s32(int32x2_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int32_t vmaxv_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vmaxv_s16(int16x4_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int16_t vmaxv_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vminnmvq_f64(float64x2_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float64_t vminnmvq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminnmvq_f32(float32x4_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vminnmvq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminnmv_f32(float32x2_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vminnmv_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vminvq_u8(uint8x16_t __p0) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint8_t vminvq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vminvq_u32(uint32x4_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vminvq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vminvq_u16(uint16x8_t __p0) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint16_t vminvq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vminvq_s8(int8x16_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int8_t vminvq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vminvq_f64(float64x2_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float64_t vminvq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminvq_f32(float32x4_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vminvq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vminvq_s32(int32x4_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int32_t vminvq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vminvq_s16(int16x8_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int16_t vminvq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vminv_u8(uint8x8_t __p0) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint8_t vminv_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vminv_u32(uint32x2_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vminv_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vminv_u16(uint16x4_t __p0) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai uint16_t vminv_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vminv_s8(int8x8_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int8_t vminv_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vminv_f32(float32x2_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vminv_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vminv_s32(int32x2_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int32_t vminv_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vminv_s16(int16x4_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai int16_t vminv_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float64x2_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+ float64x1_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+ float64x1_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint16x8_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x8_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint32x2_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint16x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x2_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x2_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+ float64x2_t __ret;
+ __ret = __p0 + __p1 * (float64x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __rev0 + __rev1 * (float64x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint64x2_t __ret; \
+ __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint64x2_t __ret; \
+ __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint64x2_t __ret; \
+ __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float64x2_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+ float64x1_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
+ float64x1_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint16x8_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x8_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint32x2_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint16x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x2_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __s2 = __p2; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x2_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+ float64x2_t __ret;
+ __ret = __p0 - __p1 * (float64x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __rev0 - __rev1 * (float64x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint64x2_t __ret; \
+ __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint64x2_t __ret; \
+ __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint64x2_t __ret; \
+ __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __s2 = __p2; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
+ poly64x1_t __ret;
+ __ret = (poly64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
+ poly64x2_t __ret;
+ __ret = (poly64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmovq_n_f64(float64_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai float64x2_t vmovq_n_f64(float64_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vmov_n_f64(float64_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai float64x1_t vmov_n_f64(float64_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_112) {
+ uint16x8_t __ret_112;
+ uint8x8_t __a1_112 = vget_high_u8(__p0_112);
+ __ret_112 = (uint16x8_t)(vshll_n_u8(__a1_112, 0));
+ return __ret_112;
+}
+#else
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_113) {
+ uint8x16_t __rev0_113; __rev0_113 = __builtin_shufflevector(__p0_113, __p0_113, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret_113;
+ uint8x8_t __a1_113 = __noswap_vget_high_u8(__rev0_113);
+ __ret_113 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_113, 0));
+ __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret_113;
+}
+__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_114) {
+ uint16x8_t __ret_114;
+ uint8x8_t __a1_114 = __noswap_vget_high_u8(__p0_114);
+ __ret_114 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_114, 0));
+ return __ret_114;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_115) {
+ uint64x2_t __ret_115;
+ uint32x2_t __a1_115 = vget_high_u32(__p0_115);
+ __ret_115 = (uint64x2_t)(vshll_n_u32(__a1_115, 0));
+ return __ret_115;
+}
+#else
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_116) {
+ uint32x4_t __rev0_116; __rev0_116 = __builtin_shufflevector(__p0_116, __p0_116, 3, 2, 1, 0);
+ uint64x2_t __ret_116;
+ uint32x2_t __a1_116 = __noswap_vget_high_u32(__rev0_116);
+ __ret_116 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_116, 0));
+ __ret_116 = __builtin_shufflevector(__ret_116, __ret_116, 1, 0);
+ return __ret_116;
+}
+__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_117) {
+ uint64x2_t __ret_117;
+ uint32x2_t __a1_117 = __noswap_vget_high_u32(__p0_117);
+ __ret_117 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_117, 0));
+ return __ret_117;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_118) {
+ uint32x4_t __ret_118;
+ uint16x4_t __a1_118 = vget_high_u16(__p0_118);
+ __ret_118 = (uint32x4_t)(vshll_n_u16(__a1_118, 0));
+ return __ret_118;
+}
+#else
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_119) {
+ uint16x8_t __rev0_119; __rev0_119 = __builtin_shufflevector(__p0_119, __p0_119, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret_119;
+ uint16x4_t __a1_119 = __noswap_vget_high_u16(__rev0_119);
+ __ret_119 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_119, 0));
+ __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 3, 2, 1, 0);
+ return __ret_119;
+}
+__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_120) {
+ uint32x4_t __ret_120;
+ uint16x4_t __a1_120 = __noswap_vget_high_u16(__p0_120);
+ __ret_120 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_120, 0));
+ return __ret_120;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_121) {
+ int16x8_t __ret_121;
+ int8x8_t __a1_121 = vget_high_s8(__p0_121);
+ __ret_121 = (int16x8_t)(vshll_n_s8(__a1_121, 0));
+ return __ret_121;
+}
+#else
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_122) {
+ int8x16_t __rev0_122; __rev0_122 = __builtin_shufflevector(__p0_122, __p0_122, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret_122;
+ int8x8_t __a1_122 = __noswap_vget_high_s8(__rev0_122);
+ __ret_122 = (int16x8_t)(__noswap_vshll_n_s8(__a1_122, 0));
+ __ret_122 = __builtin_shufflevector(__ret_122, __ret_122, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret_122;
+}
+__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_123) {
+ int16x8_t __ret_123;
+ int8x8_t __a1_123 = __noswap_vget_high_s8(__p0_123);
+ __ret_123 = (int16x8_t)(__noswap_vshll_n_s8(__a1_123, 0));
+ return __ret_123;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_124) {
+ int64x2_t __ret_124;
+ int32x2_t __a1_124 = vget_high_s32(__p0_124);
+ __ret_124 = (int64x2_t)(vshll_n_s32(__a1_124, 0));
+ return __ret_124;
+}
+#else
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_125) {
+ int32x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__p0_125, __p0_125, 3, 2, 1, 0);
+ int64x2_t __ret_125;
+ int32x2_t __a1_125 = __noswap_vget_high_s32(__rev0_125);
+ __ret_125 = (int64x2_t)(__noswap_vshll_n_s32(__a1_125, 0));
+ __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 1, 0);
+ return __ret_125;
+}
+__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_126) {
+ int64x2_t __ret_126;
+ int32x2_t __a1_126 = __noswap_vget_high_s32(__p0_126);
+ __ret_126 = (int64x2_t)(__noswap_vshll_n_s32(__a1_126, 0));
+ return __ret_126;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_127) {
+ int32x4_t __ret_127;
+ int16x4_t __a1_127 = vget_high_s16(__p0_127);
+ __ret_127 = (int32x4_t)(vshll_n_s16(__a1_127, 0));
+ return __ret_127;
+}
+#else
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_128) {
+ int16x8_t __rev0_128; __rev0_128 = __builtin_shufflevector(__p0_128, __p0_128, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret_128;
+ int16x4_t __a1_128 = __noswap_vget_high_s16(__rev0_128);
+ __ret_128 = (int32x4_t)(__noswap_vshll_n_s16(__a1_128, 0));
+ __ret_128 = __builtin_shufflevector(__ret_128, __ret_128, 3, 2, 1, 0);
+ return __ret_128;
+}
+__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_129) {
+ int32x4_t __ret_129;
+ int16x4_t __a1_129 = __noswap_vget_high_s16(__p0_129);
+ __ret_129 = (int32x4_t)(__noswap_vshll_n_s16(__a1_129, 0));
+ return __ret_129;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+ uint16x8_t __ret;
+ __ret = vcombine_u16(__p0, vmovn_u32(__p1));
+ return __ret;
+}
+#else
+__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+ uint32x4_t __ret;
+ __ret = vcombine_u32(__p0, vmovn_u64(__p1));
+ return __ret;
+}
+#else
+__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+ uint8x16_t __ret;
+ __ret = vcombine_u8(__p0, vmovn_u16(__p1));
+ return __ret;
+}
+#else
+__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+ int16x8_t __ret;
+ __ret = vcombine_s16(__p0, vmovn_s32(__p1));
+ return __ret;
+}
+#else
+__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+ int32x4_t __ret;
+ __ret = vcombine_s32(__p0, vmovn_s64(__p1));
+ return __ret;
+}
+#else
+__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+ int8x16_t __ret;
+ __ret = vcombine_s8(__p0, vmovn_s16(__p1));
+ return __ret;
+}
+#else
+__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmuld_lane_f64(__p0_130, __p1_130, __p2_130) __extension__ ({ \
+ float64_t __s0_130 = __p0_130; \
+ float64x1_t __s1_130 = __p1_130; \
+ float64_t __ret_130; \
+ __ret_130 = __s0_130 * vget_lane_f64(__s1_130, __p2_130); \
+ __ret_130; \
+})
+#else
+#define vmuld_lane_f64(__p0_131, __p1_131, __p2_131) __extension__ ({ \
+ float64_t __s0_131 = __p0_131; \
+ float64x1_t __s1_131 = __p1_131; \
+ float64_t __ret_131; \
+ __ret_131 = __s0_131 * __noswap_vget_lane_f64(__s1_131, __p2_131); \
+ __ret_131; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmuls_lane_f32(__p0_132, __p1_132, __p2_132) __extension__ ({ \
+ float32_t __s0_132 = __p0_132; \
+ float32x2_t __s1_132 = __p1_132; \
+ float32_t __ret_132; \
+ __ret_132 = __s0_132 * vget_lane_f32(__s1_132, __p2_132); \
+ __ret_132; \
+})
+#else
+#define vmuls_lane_f32(__p0_133, __p1_133, __p2_133) __extension__ ({ \
+ float32_t __s0_133 = __p0_133; \
+ float32x2_t __s1_133 = __p1_133; \
+ float32x2_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 1, 0); \
+ float32_t __ret_133; \
+ __ret_133 = __s0_133 * __noswap_vget_lane_f32(__rev1_133, __p2_133); \
+ __ret_133; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
+ __ret; \
+})
+#else
+#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmuld_laneq_f64(__p0_134, __p1_134, __p2_134) __extension__ ({ \
+ float64_t __s0_134 = __p0_134; \
+ float64x2_t __s1_134 = __p1_134; \
+ float64_t __ret_134; \
+ __ret_134 = __s0_134 * vgetq_lane_f64(__s1_134, __p2_134); \
+ __ret_134; \
+})
+#else
+#define vmuld_laneq_f64(__p0_135, __p1_135, __p2_135) __extension__ ({ \
+ float64_t __s0_135 = __p0_135; \
+ float64x2_t __s1_135 = __p1_135; \
+ float64x2_t __rev1_135; __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \
+ float64_t __ret_135; \
+ __ret_135 = __s0_135 * __noswap_vgetq_lane_f64(__rev1_135, __p2_135); \
+ __ret_135; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmuls_laneq_f32(__p0_136, __p1_136, __p2_136) __extension__ ({ \
+ float32_t __s0_136 = __p0_136; \
+ float32x4_t __s1_136 = __p1_136; \
+ float32_t __ret_136; \
+ __ret_136 = __s0_136 * vgetq_lane_f32(__s1_136, __p2_136); \
+ __ret_136; \
+})
+#else
+#define vmuls_laneq_f32(__p0_137, __p1_137, __p2_137) __extension__ ({ \
+ float32_t __s0_137 = __p0_137; \
+ float32x4_t __s1_137 = __p1_137; \
+ float32x4_t __rev1_137; __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \
+ float32_t __ret_137; \
+ __ret_137 = __s0_137 * __noswap_vgetq_lane_f32(__rev1_137, __p2_137); \
+ __ret_137; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
+ __ret; \
+})
+#else
+#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float64x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
+ return __ret;
+}
+#else
+__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
+ float64x2_t __ret;
+ __ret = __p0 * (float64x2_t) {__p1, __p1};
+ return __ret;
+}
+#else
+__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = __rev0 * (float64x2_t) {__p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
+ poly128_t __ret;
+ __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
+ poly128_t __ret;
+ __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
+ return __ret;
+}
+__ai poly128_t __noswap_vmull_p64(poly64_t __p0, poly64_t __p1) {
+ poly128_t __ret;
+ __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly16x8_t __ret;
+ __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
+ return __ret;
+}
+#else
+__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint16x8_t __ret;
+ __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
+ return __ret;
+}
+#else
+__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint64x2_t __ret;
+ __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
+ return __ret;
+}
+#else
+__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint32x4_t __ret;
+ __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
+ return __ret;
+}
+#else
+__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
+ int16x8_t __ret;
+ __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
+ return __ret;
+}
+#else
+__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+ int64x2_t __ret;
+ __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
+ return __ret;
+}
+#else
+__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+ int32x4_t __ret;
+ __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
+ return __ret;
+}
+#else
+__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly128_t __ret;
+ __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
+ return __ret;
+}
+#else
+__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ poly128_t __ret;
+ __ret = __noswap_vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
+ uint64x2_t __ret;
+ __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
+ uint32x4_t __ret;
+ __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+ int64x2_t __ret;
+ __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+ int32x4_t __ret;
+ __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
+ return __ret;
+}
+__ai float64_t __noswap_vmulxd_f64(float64_t __p0, float64_t __p1) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
+ return __ret;
+}
+__ai float32_t __noswap_vmulxs_f32(float32_t __p0, float32_t __p1) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxd_lane_f64(__p0_138, __p1_138, __p2_138) __extension__ ({ \
+ float64_t __s0_138 = __p0_138; \
+ float64x1_t __s1_138 = __p1_138; \
+ float64_t __ret_138; \
+ __ret_138 = vmulxd_f64(__s0_138, vget_lane_f64(__s1_138, __p2_138)); \
+ __ret_138; \
+})
+#else
+#define vmulxd_lane_f64(__p0_139, __p1_139, __p2_139) __extension__ ({ \
+ float64_t __s0_139 = __p0_139; \
+ float64x1_t __s1_139 = __p1_139; \
+ float64_t __ret_139; \
+ __ret_139 = __noswap_vmulxd_f64(__s0_139, __noswap_vget_lane_f64(__s1_139, __p2_139)); \
+ __ret_139; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxs_lane_f32(__p0_140, __p1_140, __p2_140) __extension__ ({ \
+ float32_t __s0_140 = __p0_140; \
+ float32x2_t __s1_140 = __p1_140; \
+ float32_t __ret_140; \
+ __ret_140 = vmulxs_f32(__s0_140, vget_lane_f32(__s1_140, __p2_140)); \
+ __ret_140; \
+})
+#else
+#define vmulxs_lane_f32(__p0_141, __p1_141, __p2_141) __extension__ ({ \
+ float32_t __s0_141 = __p0_141; \
+ float32x2_t __s1_141 = __p1_141; \
+ float32x2_t __rev1_141; __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 1, 0); \
+ float32_t __ret_141; \
+ __ret_141 = __noswap_vmulxs_f32(__s0_141, __noswap_vget_lane_f32(__rev1_141, __p2_141)); \
+ __ret_141; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x2_t __ret; \
+ __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x2_t __ret; \
+ __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxd_laneq_f64(__p0_142, __p1_142, __p2_142) __extension__ ({ \
+ float64_t __s0_142 = __p0_142; \
+ float64x2_t __s1_142 = __p1_142; \
+ float64_t __ret_142; \
+ __ret_142 = vmulxd_f64(__s0_142, vgetq_lane_f64(__s1_142, __p2_142)); \
+ __ret_142; \
+})
+#else
+#define vmulxd_laneq_f64(__p0_143, __p1_143, __p2_143) __extension__ ({ \
+ float64_t __s0_143 = __p0_143; \
+ float64x2_t __s1_143 = __p1_143; \
+ float64x2_t __rev1_143; __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 1, 0); \
+ float64_t __ret_143; \
+ __ret_143 = __noswap_vmulxd_f64(__s0_143, __noswap_vgetq_lane_f64(__rev1_143, __p2_143)); \
+ __ret_143; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxs_laneq_f32(__p0_144, __p1_144, __p2_144) __extension__ ({ \
+ float32_t __s0_144 = __p0_144; \
+ float32x4_t __s1_144 = __p1_144; \
+ float32_t __ret_144; \
+ __ret_144 = vmulxs_f32(__s0_144, vgetq_lane_f32(__s1_144, __p2_144)); \
+ __ret_144; \
+})
+#else
+#define vmulxs_laneq_f32(__p0_145, __p1_145, __p2_145) __extension__ ({ \
+ float32_t __s0_145 = __p0_145; \
+ float32x4_t __s1_145 = __p1_145; \
+ float32x4_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \
+ float32_t __ret_145; \
+ __ret_145 = __noswap_vmulxs_f32(__s0_145, __noswap_vgetq_lane_f32(__rev1_145, __p2_145)); \
+ __ret_145; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __ret; \
+ __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float64x2_t __ret; \
+ __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vnegq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai float64x2_t vnegq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vnegq_s64(int64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int64x2_t vnegq_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vneg_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai float64x1_t vneg_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vneg_s64(int64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int64x1_t vneg_s64(int64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vnegd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vnegd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpaddd_f64(float64x2_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float64_t vpaddd_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vpaddd_s64(int64x2_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai int64_t vpaddd_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpadds_f32(float32x2_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vpadds_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpmaxs_f32(float32x2_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vpmaxs_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpminqd_f64(float64x2_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float64_t vpminqd_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpmins_f32(float32x2_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vpmins_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__p0);
+ return __ret;
+}
+#else
+__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vpminnms_f32(float32x2_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__p0);
+ return __ret;
+}
+#else
+__ai float32_t vpminnms_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__rev0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vqabs_s64(int64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vqabs_s64(int64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vqabsb_s8(int8_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
+ return __ret;
+}
+#else
+__ai int8_t vqabsb_s8(int8_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqabss_s32(int32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
+ return __ret;
+}
+#else
+__ai int32_t vqabss_s32(int32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vqabsd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vqabsd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqabsh_s16(int16_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
+ return __ret;
+}
+#else
+__ai int16_t vqabsh_s16(int16_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
+ return __ret;
+}
+__ai int32_t __noswap_vqadds_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
+ return __ret;
+}
+__ai int16_t __noswap_vqaddh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
+ return __ret;
+}
+#else
+__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
+ return __ret;
+}
+#else
+__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int64x2_t __ret;
+ __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int32x4_t __ret;
+ __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
+ return __ret;
+}
+#else
+__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
+ return __ret;
+}
+#else
+__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int64x2_t __ret;
+ __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int32x4_t __ret;
+ __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
+ __ret; \
+})
+#else
+#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
+ return __ret;
+}
+__ai int32_t __noswap_vqdmulhs_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
+ return __ret;
+}
+__ai int16_t __noswap_vqdmulhh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhs_lane_s32(__p0_146, __p1_146, __p2_146) __extension__ ({ \
+ int32_t __s0_146 = __p0_146; \
+ int32x2_t __s1_146 = __p1_146; \
+ int32_t __ret_146; \
+ __ret_146 = vqdmulhs_s32(__s0_146, vget_lane_s32(__s1_146, __p2_146)); \
+ __ret_146; \
+})
+#else
+#define vqdmulhs_lane_s32(__p0_147, __p1_147, __p2_147) __extension__ ({ \
+ int32_t __s0_147 = __p0_147; \
+ int32x2_t __s1_147 = __p1_147; \
+ int32x2_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \
+ int32_t __ret_147; \
+ __ret_147 = __noswap_vqdmulhs_s32(__s0_147, __noswap_vget_lane_s32(__rev1_147, __p2_147)); \
+ __ret_147; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhh_lane_s16(__p0_148, __p1_148, __p2_148) __extension__ ({ \
+ int16_t __s0_148 = __p0_148; \
+ int16x4_t __s1_148 = __p1_148; \
+ int16_t __ret_148; \
+ __ret_148 = vqdmulhh_s16(__s0_148, vget_lane_s16(__s1_148, __p2_148)); \
+ __ret_148; \
+})
+#else
+#define vqdmulhh_lane_s16(__p0_149, __p1_149, __p2_149) __extension__ ({ \
+ int16_t __s0_149 = __p0_149; \
+ int16x4_t __s1_149 = __p1_149; \
+ int16x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
+ int16_t __ret_149; \
+ __ret_149 = __noswap_vqdmulhh_s16(__s0_149, __noswap_vget_lane_s16(__rev1_149, __p2_149)); \
+ __ret_149; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhs_laneq_s32(__p0_150, __p1_150, __p2_150) __extension__ ({ \
+ int32_t __s0_150 = __p0_150; \
+ int32x4_t __s1_150 = __p1_150; \
+ int32_t __ret_150; \
+ __ret_150 = vqdmulhs_s32(__s0_150, vgetq_lane_s32(__s1_150, __p2_150)); \
+ __ret_150; \
+})
+#else
+#define vqdmulhs_laneq_s32(__p0_151, __p1_151, __p2_151) __extension__ ({ \
+ int32_t __s0_151 = __p0_151; \
+ int32x4_t __s1_151 = __p1_151; \
+ int32x4_t __rev1_151; __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 3, 2, 1, 0); \
+ int32_t __ret_151; \
+ __ret_151 = __noswap_vqdmulhs_s32(__s0_151, __noswap_vgetq_lane_s32(__rev1_151, __p2_151)); \
+ __ret_151; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhh_laneq_s16(__p0_152, __p1_152, __p2_152) __extension__ ({ \
+ int16_t __s0_152 = __p0_152; \
+ int16x8_t __s1_152 = __p1_152; \
+ int16_t __ret_152; \
+ __ret_152 = vqdmulhh_s16(__s0_152, vgetq_lane_s16(__s1_152, __p2_152)); \
+ __ret_152; \
+})
+#else
+#define vqdmulhh_laneq_s16(__p0_153, __p1_153, __p2_153) __extension__ ({ \
+ int16_t __s0_153 = __p0_153; \
+ int16x8_t __s1_153 = __p1_153; \
+ int16x8_t __rev1_153; __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16_t __ret_153; \
+ __ret_153 = __noswap_vqdmulhh_s16(__s0_153, __noswap_vgetq_lane_s16(__rev1_153, __p2_153)); \
+ __ret_153; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
+ return __ret;
+}
+__ai int64_t __noswap_vqdmulls_s32(int32_t __p0, int32_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
+ return __ret;
+}
+__ai int32_t __noswap_vqdmullh_s16(int16_t __p0, int16_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+ int64x2_t __ret;
+ __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+ int32x4_t __ret;
+ __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+ int64x2_t __ret;
+ __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+ int32x4_t __ret;
+ __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulls_lane_s32(__p0_154, __p1_154, __p2_154) __extension__ ({ \
+ int32_t __s0_154 = __p0_154; \
+ int32x2_t __s1_154 = __p1_154; \
+ int64_t __ret_154; \
+ __ret_154 = vqdmulls_s32(__s0_154, vget_lane_s32(__s1_154, __p2_154)); \
+ __ret_154; \
+})
+#else
+#define vqdmulls_lane_s32(__p0_155, __p1_155, __p2_155) __extension__ ({ \
+ int32_t __s0_155 = __p0_155; \
+ int32x2_t __s1_155 = __p1_155; \
+ int32x2_t __rev1_155; __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 1, 0); \
+ int64_t __ret_155; \
+ __ret_155 = __noswap_vqdmulls_s32(__s0_155, __noswap_vget_lane_s32(__rev1_155, __p2_155)); \
+ __ret_155; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmullh_lane_s16(__p0_156, __p1_156, __p2_156) __extension__ ({ \
+ int16_t __s0_156 = __p0_156; \
+ int16x4_t __s1_156 = __p1_156; \
+ int32_t __ret_156; \
+ __ret_156 = vqdmullh_s16(__s0_156, vget_lane_s16(__s1_156, __p2_156)); \
+ __ret_156; \
+})
+#else
+#define vqdmullh_lane_s16(__p0_157, __p1_157, __p2_157) __extension__ ({ \
+ int16_t __s0_157 = __p0_157; \
+ int16x4_t __s1_157 = __p1_157; \
+ int16x4_t __rev1_157; __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 3, 2, 1, 0); \
+ int32_t __ret_157; \
+ __ret_157 = __noswap_vqdmullh_s16(__s0_157, __noswap_vget_lane_s16(__rev1_157, __p2_157)); \
+ __ret_157; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulls_laneq_s32(__p0_158, __p1_158, __p2_158) __extension__ ({ \
+ int32_t __s0_158 = __p0_158; \
+ int32x4_t __s1_158 = __p1_158; \
+ int64_t __ret_158; \
+ __ret_158 = vqdmulls_s32(__s0_158, vgetq_lane_s32(__s1_158, __p2_158)); \
+ __ret_158; \
+})
+#else
+#define vqdmulls_laneq_s32(__p0_159, __p1_159, __p2_159) __extension__ ({ \
+ int32_t __s0_159 = __p0_159; \
+ int32x4_t __s1_159 = __p1_159; \
+ int32x4_t __rev1_159; __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 3, 2, 1, 0); \
+ int64_t __ret_159; \
+ __ret_159 = __noswap_vqdmulls_s32(__s0_159, __noswap_vgetq_lane_s32(__rev1_159, __p2_159)); \
+ __ret_159; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmullh_laneq_s16(__p0_160, __p1_160, __p2_160) __extension__ ({ \
+ int16_t __s0_160 = __p0_160; \
+ int16x8_t __s1_160 = __p1_160; \
+ int32_t __ret_160; \
+ __ret_160 = vqdmullh_s16(__s0_160, vgetq_lane_s16(__s1_160, __p2_160)); \
+ __ret_160; \
+})
+#else
+#define vqdmullh_laneq_s16(__p0_161, __p1_161, __p2_161) __extension__ ({ \
+ int16_t __s0_161 = __p0_161; \
+ int16x8_t __s1_161 = __p1_161; \
+ int16x8_t __rev1_161; __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32_t __ret_161; \
+ __ret_161 = __noswap_vqdmullh_s16(__s0_161, __noswap_vgetq_lane_s16(__rev1_161, __p2_161)); \
+ __ret_161; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqmovns_s32(int32_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
+ return __ret;
+}
+#else
+__ai int16_t vqmovns_s32(int32_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqmovnd_s64(int64_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
+ return __ret;
+}
+#else
+__ai int32_t vqmovnd_s64(int64_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vqmovnh_s16(int16_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
+ return __ret;
+}
+#else
+__ai int8_t vqmovnh_s16(int16_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vqmovns_u32(uint32_t __p0) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
+ return __ret;
+}
+#else
+__ai uint16_t vqmovns_u32(uint32_t __p0) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vqmovnd_u64(uint64_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
+ return __ret;
+}
+#else
+__ai uint32_t vqmovnd_u64(uint64_t __p0) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vqmovnh_u16(uint16_t __p0) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
+ return __ret;
+}
+#else
+__ai uint8_t vqmovnh_u16(uint16_t __p0) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+ uint16x8_t __ret;
+ __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
+ return __ret;
+}
+#else
+__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+ uint32x4_t __ret;
+ __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
+ return __ret;
+}
+#else
+__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+ uint8x16_t __ret;
+ __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
+ return __ret;
+}
+#else
+__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+ int16x8_t __ret;
+ __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
+ return __ret;
+}
+#else
+__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+ int32x4_t __ret;
+ __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
+ return __ret;
+}
+#else
+__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+ int8x16_t __ret;
+ __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
+ return __ret;
+}
+#else
+__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqmovuns_s32(int32_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
+ return __ret;
+}
+#else
+__ai int16_t vqmovuns_s32(int32_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqmovund_s64(int64_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
+ return __ret;
+}
+#else
+__ai int32_t vqmovund_s64(int64_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vqmovunh_s16(int16_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
+ return __ret;
+}
+#else
+__ai int8_t vqmovunh_s16(int16_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
+ int16x8_t __ret;
+ __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
+ return __ret;
+}
+#else
+__ai int16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
+ int32x4_t __ret;
+ __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
+ return __ret;
+}
+#else
+__ai int32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
+ int8x16_t __ret;
+ __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
+ return __ret;
+}
+#else
+__ai int8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vqneg_s64(int64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vqneg_s64(int64x1_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vqnegb_s8(int8_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
+ return __ret;
+}
+#else
+__ai int8_t vqnegb_s8(int8_t __p0) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqnegs_s32(int32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
+ return __ret;
+}
+#else
+__ai int32_t vqnegs_s32(int32_t __p0) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vqnegd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
+ return __ret;
+}
+#else
+__ai int64_t vqnegd_s64(int64_t __p0) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqnegh_s16(int16_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
+ return __ret;
+}
+#else
+__ai int16_t vqnegh_s16(int16_t __p0) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
+ return __ret;
+}
+__ai int32_t __noswap_vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
+ return __ret;
+}
+__ai int16_t __noswap_vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhs_lane_s32(__p0_162, __p1_162, __p2_162) __extension__ ({ \
+ int32_t __s0_162 = __p0_162; \
+ int32x2_t __s1_162 = __p1_162; \
+ int32_t __ret_162; \
+ __ret_162 = vqrdmulhs_s32(__s0_162, vget_lane_s32(__s1_162, __p2_162)); \
+ __ret_162; \
+})
+#else
+#define vqrdmulhs_lane_s32(__p0_163, __p1_163, __p2_163) __extension__ ({ \
+ int32_t __s0_163 = __p0_163; \
+ int32x2_t __s1_163 = __p1_163; \
+ int32x2_t __rev1_163; __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 1, 0); \
+ int32_t __ret_163; \
+ __ret_163 = __noswap_vqrdmulhs_s32(__s0_163, __noswap_vget_lane_s32(__rev1_163, __p2_163)); \
+ __ret_163; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhh_lane_s16(__p0_164, __p1_164, __p2_164) __extension__ ({ \
+ int16_t __s0_164 = __p0_164; \
+ int16x4_t __s1_164 = __p1_164; \
+ int16_t __ret_164; \
+ __ret_164 = vqrdmulhh_s16(__s0_164, vget_lane_s16(__s1_164, __p2_164)); \
+ __ret_164; \
+})
+#else
+#define vqrdmulhh_lane_s16(__p0_165, __p1_165, __p2_165) __extension__ ({ \
+ int16_t __s0_165 = __p0_165; \
+ int16x4_t __s1_165 = __p1_165; \
+ int16x4_t __rev1_165; __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 3, 2, 1, 0); \
+ int16_t __ret_165; \
+ __ret_165 = __noswap_vqrdmulhh_s16(__s0_165, __noswap_vget_lane_s16(__rev1_165, __p2_165)); \
+ __ret_165; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhs_laneq_s32(__p0_166, __p1_166, __p2_166) __extension__ ({ \
+ int32_t __s0_166 = __p0_166; \
+ int32x4_t __s1_166 = __p1_166; \
+ int32_t __ret_166; \
+ __ret_166 = vqrdmulhs_s32(__s0_166, vgetq_lane_s32(__s1_166, __p2_166)); \
+ __ret_166; \
+})
+#else
+#define vqrdmulhs_laneq_s32(__p0_167, __p1_167, __p2_167) __extension__ ({ \
+ int32_t __s0_167 = __p0_167; \
+ int32x4_t __s1_167 = __p1_167; \
+ int32x4_t __rev1_167; __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 3, 2, 1, 0); \
+ int32_t __ret_167; \
+ __ret_167 = __noswap_vqrdmulhs_s32(__s0_167, __noswap_vgetq_lane_s32(__rev1_167, __p2_167)); \
+ __ret_167; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhh_laneq_s16(__p0_168, __p1_168, __p2_168) __extension__ ({ \
+ int16_t __s0_168 = __p0_168; \
+ int16x8_t __s1_168 = __p1_168; \
+ int16_t __ret_168; \
+ __ret_168 = vqrdmulhh_s16(__s0_168, vgetq_lane_s16(__s1_168, __p2_168)); \
+ __ret_168; \
+})
+#else
+#define vqrdmulhh_laneq_s16(__p0_169, __p1_169, __p2_169) __extension__ ({ \
+ int16_t __s0_169 = __p0_169; \
+ int16x8_t __s1_169 = __p1_169; \
+ int16x8_t __rev1_169; __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16_t __ret_169; \
+ __ret_169 = __noswap_vqrdmulhh_s16(__s0_169, __noswap_vgetq_lane_s16(__rev1_169, __p2_169)); \
+ __ret_169; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_u32(__p0_170, __p1_170, __p2_170) __extension__ ({ \
+ uint16x4_t __s0_170 = __p0_170; \
+ uint32x4_t __s1_170 = __p1_170; \
+ uint16x8_t __ret_170; \
+ __ret_170 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_170), (uint16x4_t)(vqrshrn_n_u32(__s1_170, __p2_170)))); \
+ __ret_170; \
+})
+#else
+#define vqrshrn_high_n_u32(__p0_171, __p1_171, __p2_171) __extension__ ({ \
+ uint16x4_t __s0_171 = __p0_171; \
+ uint32x4_t __s1_171 = __p1_171; \
+ uint16x4_t __rev0_171; __rev0_171 = __builtin_shufflevector(__s0_171, __s0_171, 3, 2, 1, 0); \
+ uint32x4_t __rev1_171; __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \
+ uint16x8_t __ret_171; \
+ __ret_171 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_171), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_171, __p2_171)))); \
+ __ret_171 = __builtin_shufflevector(__ret_171, __ret_171, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_171; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_u64(__p0_172, __p1_172, __p2_172) __extension__ ({ \
+ uint32x2_t __s0_172 = __p0_172; \
+ uint64x2_t __s1_172 = __p1_172; \
+ uint32x4_t __ret_172; \
+ __ret_172 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_172), (uint32x2_t)(vqrshrn_n_u64(__s1_172, __p2_172)))); \
+ __ret_172; \
+})
+#else
+#define vqrshrn_high_n_u64(__p0_173, __p1_173, __p2_173) __extension__ ({ \
+ uint32x2_t __s0_173 = __p0_173; \
+ uint64x2_t __s1_173 = __p1_173; \
+ uint32x2_t __rev0_173; __rev0_173 = __builtin_shufflevector(__s0_173, __s0_173, 1, 0); \
+ uint64x2_t __rev1_173; __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 1, 0); \
+ uint32x4_t __ret_173; \
+ __ret_173 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_173), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_173, __p2_173)))); \
+ __ret_173 = __builtin_shufflevector(__ret_173, __ret_173, 3, 2, 1, 0); \
+ __ret_173; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_u16(__p0_174, __p1_174, __p2_174) __extension__ ({ \
+ uint8x8_t __s0_174 = __p0_174; \
+ uint16x8_t __s1_174 = __p1_174; \
+ uint8x16_t __ret_174; \
+ __ret_174 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_174), (uint8x8_t)(vqrshrn_n_u16(__s1_174, __p2_174)))); \
+ __ret_174; \
+})
+#else
+#define vqrshrn_high_n_u16(__p0_175, __p1_175, __p2_175) __extension__ ({ \
+ uint8x8_t __s0_175 = __p0_175; \
+ uint16x8_t __s1_175 = __p1_175; \
+ uint8x8_t __rev0_175; __rev0_175 = __builtin_shufflevector(__s0_175, __s0_175, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1_175; __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret_175; \
+ __ret_175 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_175), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_175, __p2_175)))); \
+ __ret_175 = __builtin_shufflevector(__ret_175, __ret_175, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_175; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_s32(__p0_176, __p1_176, __p2_176) __extension__ ({ \
+ int16x4_t __s0_176 = __p0_176; \
+ int32x4_t __s1_176 = __p1_176; \
+ int16x8_t __ret_176; \
+ __ret_176 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_176), (int16x4_t)(vqrshrn_n_s32(__s1_176, __p2_176)))); \
+ __ret_176; \
+})
+#else
+#define vqrshrn_high_n_s32(__p0_177, __p1_177, __p2_177) __extension__ ({ \
+ int16x4_t __s0_177 = __p0_177; \
+ int32x4_t __s1_177 = __p1_177; \
+ int16x4_t __rev0_177; __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, 3, 2, 1, 0); \
+ int32x4_t __rev1_177; __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 3, 2, 1, 0); \
+ int16x8_t __ret_177; \
+ __ret_177 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_177), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_177, __p2_177)))); \
+ __ret_177 = __builtin_shufflevector(__ret_177, __ret_177, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_177; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_s64(__p0_178, __p1_178, __p2_178) __extension__ ({ \
+ int32x2_t __s0_178 = __p0_178; \
+ int64x2_t __s1_178 = __p1_178; \
+ int32x4_t __ret_178; \
+ __ret_178 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_178), (int32x2_t)(vqrshrn_n_s64(__s1_178, __p2_178)))); \
+ __ret_178; \
+})
+#else
+#define vqrshrn_high_n_s64(__p0_179, __p1_179, __p2_179) __extension__ ({ \
+ int32x2_t __s0_179 = __p0_179; \
+ int64x2_t __s1_179 = __p1_179; \
+ int32x2_t __rev0_179; __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, 1, 0); \
+ int64x2_t __rev1_179; __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 1, 0); \
+ int32x4_t __ret_179; \
+ __ret_179 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_179), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_179, __p2_179)))); \
+ __ret_179 = __builtin_shufflevector(__ret_179, __ret_179, 3, 2, 1, 0); \
+ __ret_179; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_high_n_s16(__p0_180, __p1_180, __p2_180) __extension__ ({ \
+ int8x8_t __s0_180 = __p0_180; \
+ int16x8_t __s1_180 = __p1_180; \
+ int8x16_t __ret_180; \
+ __ret_180 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_180), (int8x8_t)(vqrshrn_n_s16(__s1_180, __p2_180)))); \
+ __ret_180; \
+})
+#else
+#define vqrshrn_high_n_s16(__p0_181, __p1_181, __p2_181) __extension__ ({ \
+ int8x8_t __s0_181 = __p0_181; \
+ int16x8_t __s1_181 = __p1_181; \
+ int8x8_t __rev0_181; __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1_181; __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret_181; \
+ __ret_181 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_181), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_181, __p2_181)))); \
+ __ret_181 = __builtin_shufflevector(__ret_181, __ret_181, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_181; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_high_n_s32(__p0_182, __p1_182, __p2_182) __extension__ ({ \
+ int16x4_t __s0_182 = __p0_182; \
+ int32x4_t __s1_182 = __p1_182; \
+ int16x8_t __ret_182; \
+ __ret_182 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_182), (int16x4_t)(vqrshrun_n_s32(__s1_182, __p2_182)))); \
+ __ret_182; \
+})
+#else
+#define vqrshrun_high_n_s32(__p0_183, __p1_183, __p2_183) __extension__ ({ \
+ int16x4_t __s0_183 = __p0_183; \
+ int32x4_t __s1_183 = __p1_183; \
+ int16x4_t __rev0_183; __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 3, 2, 1, 0); \
+ int32x4_t __rev1_183; __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 3, 2, 1, 0); \
+ int16x8_t __ret_183; \
+ __ret_183 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_183), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_183, __p2_183)))); \
+ __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_183; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_high_n_s64(__p0_184, __p1_184, __p2_184) __extension__ ({ \
+ int32x2_t __s0_184 = __p0_184; \
+ int64x2_t __s1_184 = __p1_184; \
+ int32x4_t __ret_184; \
+ __ret_184 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_184), (int32x2_t)(vqrshrun_n_s64(__s1_184, __p2_184)))); \
+ __ret_184; \
+})
+#else
+#define vqrshrun_high_n_s64(__p0_185, __p1_185, __p2_185) __extension__ ({ \
+ int32x2_t __s0_185 = __p0_185; \
+ int64x2_t __s1_185 = __p1_185; \
+ int32x2_t __rev0_185; __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 1, 0); \
+ int64x2_t __rev1_185; __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 1, 0); \
+ int32x4_t __ret_185; \
+ __ret_185 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_185), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_185, __p2_185)))); \
+ __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \
+ __ret_185; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_high_n_s16(__p0_186, __p1_186, __p2_186) __extension__ ({ \
+ int8x8_t __s0_186 = __p0_186; \
+ int16x8_t __s1_186 = __p1_186; \
+ int8x16_t __ret_186; \
+ __ret_186 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_186), (int8x8_t)(vqrshrun_n_s16(__s1_186, __p2_186)))); \
+ __ret_186; \
+})
+#else
+#define vqrshrun_high_n_s16(__p0_187, __p1_187, __p2_187) __extension__ ({ \
+ int8x8_t __s0_187 = __p0_187; \
+ int16x8_t __s1_187 = __p1_187; \
+ int8x8_t __rev0_187; __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1_187; __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret_187; \
+ __ret_187 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_187), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_187, __p2_187)))); \
+ __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_187; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_u32(__p0_188, __p1_188, __p2_188) __extension__ ({ \
+ uint16x4_t __s0_188 = __p0_188; \
+ uint32x4_t __s1_188 = __p1_188; \
+ uint16x8_t __ret_188; \
+ __ret_188 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_188), (uint16x4_t)(vqshrn_n_u32(__s1_188, __p2_188)))); \
+ __ret_188; \
+})
+#else
+#define vqshrn_high_n_u32(__p0_189, __p1_189, __p2_189) __extension__ ({ \
+ uint16x4_t __s0_189 = __p0_189; \
+ uint32x4_t __s1_189 = __p1_189; \
+ uint16x4_t __rev0_189; __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \
+ uint32x4_t __rev1_189; __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \
+ uint16x8_t __ret_189; \
+ __ret_189 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_189), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_189, __p2_189)))); \
+ __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_189; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_u64(__p0_190, __p1_190, __p2_190) __extension__ ({ \
+ uint32x2_t __s0_190 = __p0_190; \
+ uint64x2_t __s1_190 = __p1_190; \
+ uint32x4_t __ret_190; \
+ __ret_190 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_190), (uint32x2_t)(vqshrn_n_u64(__s1_190, __p2_190)))); \
+ __ret_190; \
+})
+#else
+#define vqshrn_high_n_u64(__p0_191, __p1_191, __p2_191) __extension__ ({ \
+ uint32x2_t __s0_191 = __p0_191; \
+ uint64x2_t __s1_191 = __p1_191; \
+ uint32x2_t __rev0_191; __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \
+ uint64x2_t __rev1_191; __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \
+ uint32x4_t __ret_191; \
+ __ret_191 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_191), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_191, __p2_191)))); \
+ __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 3, 2, 1, 0); \
+ __ret_191; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_u16(__p0_192, __p1_192, __p2_192) __extension__ ({ \
+ uint8x8_t __s0_192 = __p0_192; \
+ uint16x8_t __s1_192 = __p1_192; \
+ uint8x16_t __ret_192; \
+ __ret_192 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_192), (uint8x8_t)(vqshrn_n_u16(__s1_192, __p2_192)))); \
+ __ret_192; \
+})
+#else
+#define vqshrn_high_n_u16(__p0_193, __p1_193, __p2_193) __extension__ ({ \
+ uint8x8_t __s0_193 = __p0_193; \
+ uint16x8_t __s1_193 = __p1_193; \
+ uint8x8_t __rev0_193; __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1_193; __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret_193; \
+ __ret_193 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_193), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_193, __p2_193)))); \
+ __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_193; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_s32(__p0_194, __p1_194, __p2_194) __extension__ ({ \
+ int16x4_t __s0_194 = __p0_194; \
+ int32x4_t __s1_194 = __p1_194; \
+ int16x8_t __ret_194; \
+ __ret_194 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_194), (int16x4_t)(vqshrn_n_s32(__s1_194, __p2_194)))); \
+ __ret_194; \
+})
+#else
+#define vqshrn_high_n_s32(__p0_195, __p1_195, __p2_195) __extension__ ({ \
+ int16x4_t __s0_195 = __p0_195; \
+ int32x4_t __s1_195 = __p1_195; \
+ int16x4_t __rev0_195; __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 3, 2, 1, 0); \
+ int32x4_t __rev1_195; __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 3, 2, 1, 0); \
+ int16x8_t __ret_195; \
+ __ret_195 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_195), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_195, __p2_195)))); \
+ __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_195; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_s64(__p0_196, __p1_196, __p2_196) __extension__ ({ \
+ int32x2_t __s0_196 = __p0_196; \
+ int64x2_t __s1_196 = __p1_196; \
+ int32x4_t __ret_196; \
+ __ret_196 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_196), (int32x2_t)(vqshrn_n_s64(__s1_196, __p2_196)))); \
+ __ret_196; \
+})
+#else
+#define vqshrn_high_n_s64(__p0_197, __p1_197, __p2_197) __extension__ ({ \
+ int32x2_t __s0_197 = __p0_197; \
+ int64x2_t __s1_197 = __p1_197; \
+ int32x2_t __rev0_197; __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 1, 0); \
+ int64x2_t __rev1_197; __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 1, 0); \
+ int32x4_t __ret_197; \
+ __ret_197 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_197), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_197, __p2_197)))); \
+ __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \
+ __ret_197; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_high_n_s16(__p0_198, __p1_198, __p2_198) __extension__ ({ \
+ int8x8_t __s0_198 = __p0_198; \
+ int16x8_t __s1_198 = __p1_198; \
+ int8x16_t __ret_198; \
+ __ret_198 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_198), (int8x8_t)(vqshrn_n_s16(__s1_198, __p2_198)))); \
+ __ret_198; \
+})
+#else
+#define vqshrn_high_n_s16(__p0_199, __p1_199, __p2_199) __extension__ ({ \
+ int8x8_t __s0_199 = __p0_199; \
+ int16x8_t __s1_199 = __p1_199; \
+ int8x8_t __rev0_199; __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1_199; __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret_199; \
+ __ret_199 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_199), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_199, __p2_199)))); \
+ __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_199; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_high_n_s32(__p0_200, __p1_200, __p2_200) __extension__ ({ \
+ int16x4_t __s0_200 = __p0_200; \
+ int32x4_t __s1_200 = __p1_200; \
+ int16x8_t __ret_200; \
+ __ret_200 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_200), (int16x4_t)(vqshrun_n_s32(__s1_200, __p2_200)))); \
+ __ret_200; \
+})
+#else
+#define vqshrun_high_n_s32(__p0_201, __p1_201, __p2_201) __extension__ ({ \
+ int16x4_t __s0_201 = __p0_201; \
+ int32x4_t __s1_201 = __p1_201; \
+ int16x4_t __rev0_201; __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \
+ int32x4_t __rev1_201; __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \
+ int16x8_t __ret_201; \
+ __ret_201 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_201), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_201, __p2_201)))); \
+ __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_201; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_high_n_s64(__p0_202, __p1_202, __p2_202) __extension__ ({ \
+ int32x2_t __s0_202 = __p0_202; \
+ int64x2_t __s1_202 = __p1_202; \
+ int32x4_t __ret_202; \
+ __ret_202 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_202), (int32x2_t)(vqshrun_n_s64(__s1_202, __p2_202)))); \
+ __ret_202; \
+})
+#else
+#define vqshrun_high_n_s64(__p0_203, __p1_203, __p2_203) __extension__ ({ \
+ int32x2_t __s0_203 = __p0_203; \
+ int64x2_t __s1_203 = __p1_203; \
+ int32x2_t __rev0_203; __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \
+ int64x2_t __rev1_203; __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \
+ int32x4_t __ret_203; \
+ __ret_203 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_203), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_203, __p2_203)))); \
+ __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 3, 2, 1, 0); \
+ __ret_203; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_high_n_s16(__p0_204, __p1_204, __p2_204) __extension__ ({ \
+ int8x8_t __s0_204 = __p0_204; \
+ int16x8_t __s1_204 = __p1_204; \
+ int8x16_t __ret_204; \
+ __ret_204 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_204), (int8x8_t)(vqshrun_n_s16(__s1_204, __p2_204)))); \
+ __ret_204; \
+})
+#else
+#define vqshrun_high_n_s16(__p0_205, __p1_205, __p2_205) __extension__ ({ \
+ int8x8_t __s0_205 = __p0_205; \
+ int16x8_t __s1_205 = __p1_205; \
+ int8x8_t __rev0_205; __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1_205; __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret_205; \
+ __ret_205 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_205), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_205, __p2_205)))); \
+ __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_205; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
+ return __ret;
+}
+__ai int32_t __noswap_vqsubs_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
+ return __ret;
+}
+__ai int16_t __noswap_vqsubh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
+ poly8x16x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
+ poly8x16x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
+ uint8x16x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) {
+ int8x16x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
+ uint8x16x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) {
+ int8x16x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
+ poly8x16x3_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
+ poly8x16x3_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
+ uint8x16x3_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) {
+ int8x16x3_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
+ uint8x16x3_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) {
+ int8x16x3_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
+ poly8x16x4_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
+ poly8x16x4_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
+ uint8x16x4_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) {
+ int8x16x4_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
+ uint8x16x4_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) {
+ int8x16x4_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16x2_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16x2_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16x2_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16x2_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16x2_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16x2_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16x3_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16x3_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16x3_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16x3_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16x3_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16x3_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16x4_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16x4_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16x4_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16x4_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16x4_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16x4_t __rev1;
+ __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint16x8_t __ret;
+ __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint32x4_t __ret;
+ __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint8x16_t __ret;
+ __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int16x8_t __ret;
+ __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ int32x4_t __ret;
+ __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int8x16_t __ret;
+ __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrbit_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vrbit_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vrecped_f64(float64_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
+ return __ret;
+}
+#else
+__ai float64_t vrecped_f64(float64_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vrecpes_f32(float32_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
+ return __ret;
+}
+#else
+__ai float32_t vrecpes_f32(float32_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vrecpxd_f64(float64_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
+ return __ret;
+}
+#else
+__ai float64_t vrecpxd_f64(float64_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vrecpxs_f32(float32_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
+ return __ret;
+}
+#else
+__ai float32_t vrecpxs_f32(float32_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_high_n_u32(__p0_206, __p1_206, __p2_206) __extension__ ({ \
+ uint16x4_t __s0_206 = __p0_206; \
+ uint32x4_t __s1_206 = __p1_206; \
+ uint16x8_t __ret_206; \
+ __ret_206 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_206), (uint16x4_t)(vrshrn_n_u32(__s1_206, __p2_206)))); \
+ __ret_206; \
+})
+#else
+#define vrshrn_high_n_u32(__p0_207, __p1_207, __p2_207) __extension__ ({ \
+ uint16x4_t __s0_207 = __p0_207; \
+ uint32x4_t __s1_207 = __p1_207; \
+ uint16x4_t __rev0_207; __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 3, 2, 1, 0); \
+ uint32x4_t __rev1_207; __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 3, 2, 1, 0); \
+ uint16x8_t __ret_207; \
+ __ret_207 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_207), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_207, __p2_207)))); \
+ __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_207; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_high_n_u64(__p0_208, __p1_208, __p2_208) __extension__ ({ \
+ uint32x2_t __s0_208 = __p0_208; \
+ uint64x2_t __s1_208 = __p1_208; \
+ uint32x4_t __ret_208; \
+ __ret_208 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_208), (uint32x2_t)(vrshrn_n_u64(__s1_208, __p2_208)))); \
+ __ret_208; \
+})
+#else
+#define vrshrn_high_n_u64(__p0_209, __p1_209, __p2_209) __extension__ ({ \
+ uint32x2_t __s0_209 = __p0_209; \
+ uint64x2_t __s1_209 = __p1_209; \
+ uint32x2_t __rev0_209; __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 1, 0); \
+ uint64x2_t __rev1_209; __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 1, 0); \
+ uint32x4_t __ret_209; \
+ __ret_209 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_209), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_209, __p2_209)))); \
+ __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \
+ __ret_209; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_high_n_u16(__p0_210, __p1_210, __p2_210) __extension__ ({ \
+ uint8x8_t __s0_210 = __p0_210; \
+ uint16x8_t __s1_210 = __p1_210; \
+ uint8x16_t __ret_210; \
+ __ret_210 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_210), (uint8x8_t)(vrshrn_n_u16(__s1_210, __p2_210)))); \
+ __ret_210; \
+})
+#else
+#define vrshrn_high_n_u16(__p0_211, __p1_211, __p2_211) __extension__ ({ \
+ uint8x8_t __s0_211 = __p0_211; \
+ uint16x8_t __s1_211 = __p1_211; \
+ uint8x8_t __rev0_211; __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1_211; __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret_211; \
+ __ret_211 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_211), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_211, __p2_211)))); \
+ __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_211; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_high_n_s32(__p0_212, __p1_212, __p2_212) __extension__ ({ \
+ int16x4_t __s0_212 = __p0_212; \
+ int32x4_t __s1_212 = __p1_212; \
+ int16x8_t __ret_212; \
+ __ret_212 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_212), (int16x4_t)(vrshrn_n_s32(__s1_212, __p2_212)))); \
+ __ret_212; \
+})
+#else
+#define vrshrn_high_n_s32(__p0_213, __p1_213, __p2_213) __extension__ ({ \
+ int16x4_t __s0_213 = __p0_213; \
+ int32x4_t __s1_213 = __p1_213; \
+ int16x4_t __rev0_213; __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \
+ int32x4_t __rev1_213; __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \
+ int16x8_t __ret_213; \
+ __ret_213 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_213), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_213, __p2_213)))); \
+ __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_213; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_high_n_s64(__p0_214, __p1_214, __p2_214) __extension__ ({ \
+ int32x2_t __s0_214 = __p0_214; \
+ int64x2_t __s1_214 = __p1_214; \
+ int32x4_t __ret_214; \
+ __ret_214 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_214), (int32x2_t)(vrshrn_n_s64(__s1_214, __p2_214)))); \
+ __ret_214; \
+})
+#else
+#define vrshrn_high_n_s64(__p0_215, __p1_215, __p2_215) __extension__ ({ \
+ int32x2_t __s0_215 = __p0_215; \
+ int64x2_t __s1_215 = __p1_215; \
+ int32x2_t __rev0_215; __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 1, 0); \
+ int64x2_t __rev1_215; __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \
+ int32x4_t __ret_215; \
+ __ret_215 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_215), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_215, __p2_215)))); \
+ __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \
+ __ret_215; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_high_n_s16(__p0_216, __p1_216, __p2_216) __extension__ ({ \
+ int8x8_t __s0_216 = __p0_216; \
+ int16x8_t __s1_216 = __p1_216; \
+ int8x16_t __ret_216; \
+ __ret_216 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_216), (int8x8_t)(vrshrn_n_s16(__s1_216, __p2_216)))); \
+ __ret_216; \
+})
+#else
+#define vrshrn_high_n_s16(__p0_217, __p1_217, __p2_217) __extension__ ({ \
+ int8x8_t __s0_217 = __p0_217; \
+ int16x8_t __s1_217 = __p1_217; \
+ int8x8_t __rev0_217; __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1_217; __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret_217; \
+ __ret_217 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_217), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_217, __p2_217)))); \
+ __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_217; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vrsqrted_f64(float64_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
+ return __ret;
+}
+#else
+__ai float64_t vrsqrted_f64(float64_t __p0) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vrsqrtes_f32(float32_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
+ return __ret;
+}
+#else
+__ai float32_t vrsqrtes_f32(float32_t __p0) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
+ float64_t __ret;
+ __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
+ float32_t __ret;
+ __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __s1 = __p1; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
+ __ret; \
+})
+#else
+#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __s1 = __p1; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __s1 = __p1; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
+ __ret; \
+})
+#else
+#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __s1 = __p1; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint16x8_t __ret;
+ __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint32x4_t __ret;
+ __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint8x16_t __ret;
+ __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int16x8_t __ret;
+ __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ int32x4_t __ret;
+ __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int8x16_t __ret;
+ __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64_t __s0 = __p0; \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64_t __s0 = __p0; \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#define __noswap_vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64_t __s0 = __p0; \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64_t __s0 = __p0; \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64_t __s0 = __p0; \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64_t __s0 = __p0; \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __ret; \
+ __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#define __noswap_vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64_t __s0 = __p0; \
+ float64x1_t __s1 = __p1; \
+ float64x1_t __ret; \
+ __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshld_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vshld_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshld_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vshld_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_high_n_u8(__p0_218, __p1_218) __extension__ ({ \
+ uint8x16_t __s0_218 = __p0_218; \
+ uint16x8_t __ret_218; \
+ __ret_218 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_218), __p1_218)); \
+ __ret_218; \
+})
+#else
+#define vshll_high_n_u8(__p0_219, __p1_219) __extension__ ({ \
+ uint8x16_t __s0_219 = __p0_219; \
+ uint8x16_t __rev0_219; __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret_219; \
+ __ret_219 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_219), __p1_219)); \
+ __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_219; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_high_n_u32(__p0_220, __p1_220) __extension__ ({ \
+ uint32x4_t __s0_220 = __p0_220; \
+ uint64x2_t __ret_220; \
+ __ret_220 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_220), __p1_220)); \
+ __ret_220; \
+})
+#else
+#define vshll_high_n_u32(__p0_221, __p1_221) __extension__ ({ \
+ uint32x4_t __s0_221 = __p0_221; \
+ uint32x4_t __rev0_221; __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 3, 2, 1, 0); \
+ uint64x2_t __ret_221; \
+ __ret_221 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_221), __p1_221)); \
+ __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 1, 0); \
+ __ret_221; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_high_n_u16(__p0_222, __p1_222) __extension__ ({ \
+ uint16x8_t __s0_222 = __p0_222; \
+ uint32x4_t __ret_222; \
+ __ret_222 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_222), __p1_222)); \
+ __ret_222; \
+})
+#else
+#define vshll_high_n_u16(__p0_223, __p1_223) __extension__ ({ \
+ uint16x8_t __s0_223 = __p0_223; \
+ uint16x8_t __rev0_223; __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint32x4_t __ret_223; \
+ __ret_223 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_223), __p1_223)); \
+ __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 3, 2, 1, 0); \
+ __ret_223; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_high_n_s8(__p0_224, __p1_224) __extension__ ({ \
+ int8x16_t __s0_224 = __p0_224; \
+ int16x8_t __ret_224; \
+ __ret_224 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_224), __p1_224)); \
+ __ret_224; \
+})
+#else
+#define vshll_high_n_s8(__p0_225, __p1_225) __extension__ ({ \
+ int8x16_t __s0_225 = __p0_225; \
+ int8x16_t __rev0_225; __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret_225; \
+ __ret_225 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_225), __p1_225)); \
+ __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_225; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_high_n_s32(__p0_226, __p1_226) __extension__ ({ \
+ int32x4_t __s0_226 = __p0_226; \
+ int64x2_t __ret_226; \
+ __ret_226 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_226), __p1_226)); \
+ __ret_226; \
+})
+#else
+#define vshll_high_n_s32(__p0_227, __p1_227) __extension__ ({ \
+ int32x4_t __s0_227 = __p0_227; \
+ int32x4_t __rev0_227; __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 3, 2, 1, 0); \
+ int64x2_t __ret_227; \
+ __ret_227 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_227), __p1_227)); \
+ __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 1, 0); \
+ __ret_227; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_high_n_s16(__p0_228, __p1_228) __extension__ ({ \
+ int16x8_t __s0_228 = __p0_228; \
+ int32x4_t __ret_228; \
+ __ret_228 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_228), __p1_228)); \
+ __ret_228; \
+})
+#else
+#define vshll_high_n_s16(__p0_229, __p1_229) __extension__ ({ \
+ int16x8_t __s0_229 = __p0_229; \
+ int16x8_t __rev0_229; __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int32x4_t __ret_229; \
+ __ret_229 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_229), __p1_229)); \
+ __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 3, 2, 1, 0); \
+ __ret_229; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
+ __ret; \
+})
+#else
+#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_u32(__p0_230, __p1_230, __p2_230) __extension__ ({ \
+ uint16x4_t __s0_230 = __p0_230; \
+ uint32x4_t __s1_230 = __p1_230; \
+ uint16x8_t __ret_230; \
+ __ret_230 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_230), (uint16x4_t)(vshrn_n_u32(__s1_230, __p2_230)))); \
+ __ret_230; \
+})
+#else
+#define vshrn_high_n_u32(__p0_231, __p1_231, __p2_231) __extension__ ({ \
+ uint16x4_t __s0_231 = __p0_231; \
+ uint32x4_t __s1_231 = __p1_231; \
+ uint16x4_t __rev0_231; __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 3, 2, 1, 0); \
+ uint32x4_t __rev1_231; __rev1_231 = __builtin_shufflevector(__s1_231, __s1_231, 3, 2, 1, 0); \
+ uint16x8_t __ret_231; \
+ __ret_231 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_231), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_231, __p2_231)))); \
+ __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_231; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_u64(__p0_232, __p1_232, __p2_232) __extension__ ({ \
+ uint32x2_t __s0_232 = __p0_232; \
+ uint64x2_t __s1_232 = __p1_232; \
+ uint32x4_t __ret_232; \
+ __ret_232 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_232), (uint32x2_t)(vshrn_n_u64(__s1_232, __p2_232)))); \
+ __ret_232; \
+})
+#else
+#define vshrn_high_n_u64(__p0_233, __p1_233, __p2_233) __extension__ ({ \
+ uint32x2_t __s0_233 = __p0_233; \
+ uint64x2_t __s1_233 = __p1_233; \
+ uint32x2_t __rev0_233; __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 1, 0); \
+ uint64x2_t __rev1_233; __rev1_233 = __builtin_shufflevector(__s1_233, __s1_233, 1, 0); \
+ uint32x4_t __ret_233; \
+ __ret_233 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_233), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_233, __p2_233)))); \
+ __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 3, 2, 1, 0); \
+ __ret_233; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_u16(__p0_234, __p1_234, __p2_234) __extension__ ({ \
+ uint8x8_t __s0_234 = __p0_234; \
+ uint16x8_t __s1_234 = __p1_234; \
+ uint8x16_t __ret_234; \
+ __ret_234 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_234), (uint8x8_t)(vshrn_n_u16(__s1_234, __p2_234)))); \
+ __ret_234; \
+})
+#else
+#define vshrn_high_n_u16(__p0_235, __p1_235, __p2_235) __extension__ ({ \
+ uint8x8_t __s0_235 = __p0_235; \
+ uint16x8_t __s1_235 = __p1_235; \
+ uint8x8_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1_235; __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret_235; \
+ __ret_235 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_235), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_235, __p2_235)))); \
+ __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_235; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_s32(__p0_236, __p1_236, __p2_236) __extension__ ({ \
+ int16x4_t __s0_236 = __p0_236; \
+ int32x4_t __s1_236 = __p1_236; \
+ int16x8_t __ret_236; \
+ __ret_236 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_236), (int16x4_t)(vshrn_n_s32(__s1_236, __p2_236)))); \
+ __ret_236; \
+})
+#else
+#define vshrn_high_n_s32(__p0_237, __p1_237, __p2_237) __extension__ ({ \
+ int16x4_t __s0_237 = __p0_237; \
+ int32x4_t __s1_237 = __p1_237; \
+ int16x4_t __rev0_237; __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \
+ int32x4_t __rev1_237; __rev1_237 = __builtin_shufflevector(__s1_237, __s1_237, 3, 2, 1, 0); \
+ int16x8_t __ret_237; \
+ __ret_237 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_237), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_237, __p2_237)))); \
+ __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_237; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_s64(__p0_238, __p1_238, __p2_238) __extension__ ({ \
+ int32x2_t __s0_238 = __p0_238; \
+ int64x2_t __s1_238 = __p1_238; \
+ int32x4_t __ret_238; \
+ __ret_238 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_238), (int32x2_t)(vshrn_n_s64(__s1_238, __p2_238)))); \
+ __ret_238; \
+})
+#else
+#define vshrn_high_n_s64(__p0_239, __p1_239, __p2_239) __extension__ ({ \
+ int32x2_t __s0_239 = __p0_239; \
+ int64x2_t __s1_239 = __p1_239; \
+ int32x2_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
+ int64x2_t __rev1_239; __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
+ int32x4_t __ret_239; \
+ __ret_239 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_239), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_239, __p2_239)))); \
+ __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \
+ __ret_239; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_high_n_s16(__p0_240, __p1_240, __p2_240) __extension__ ({ \
+ int8x8_t __s0_240 = __p0_240; \
+ int16x8_t __s1_240 = __p1_240; \
+ int8x16_t __ret_240; \
+ __ret_240 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_240), (int8x8_t)(vshrn_n_s16(__s1_240, __p2_240)))); \
+ __ret_240; \
+})
+#else
+#define vshrn_high_n_s16(__p0_241, __p1_241, __p2_241) __extension__ ({ \
+ int8x8_t __s0_241 = __p0_241; \
+ int16x8_t __s1_241 = __p1_241; \
+ int8x8_t __rev0_241; __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1_241; __rev1_241 = __builtin_shufflevector(__s1_241, __s1_241, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret_241; \
+ __ret_241 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_241), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_241, __p2_241)))); \
+ __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_241; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __s1 = __p1; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
+ __ret; \
+})
+#else
+#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __s1 = __p1; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __s1 = __p1; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
+ __ret; \
+})
+#else
+#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __s1 = __p1; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
+ __ret; \
+})
+#else
+#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
+ __ret; \
+})
+#else
+#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) {
+ uint8_t __ret;
+ __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) {
+ uint32_t __ret;
+ __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) {
+ uint16_t __ret;
+ __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42);
+ return __ret;
+}
+#else
+__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __ret;
+ __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vsqrt_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#else
+__ai float64x1_t vsqrt_f64(float64x1_t __p0) {
+ float64x1_t __ret;
+ __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __s1 = __p1; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
+ __ret; \
+})
+#else
+#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __s1 = __p1; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __s1 = __p1; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
+ __ret; \
+})
+#else
+#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __s1 = __p1; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __s1 = __p1; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
+ __ret; \
+})
+#else
+#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64_t __s1 = __p1; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __s1 = __p1; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
+ __ret; \
+})
+#else
+#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64_t __s1 = __p1; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
+ __ret; \
+})
+#else
+#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1_t __s0 = __p0; \
+ poly64x1_t __s1 = __p1; \
+ poly64x1_t __ret; \
+ __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
+ __ret; \
+})
+#else
+#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2_t __s0 = __p0; \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ poly64x2_t __ret; \
+ __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p64(__p0, __p1) __extension__ ({ \
+ poly64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
+})
+#else
+#define vst1_p64(__p0, __p1) __extension__ ({ \
+ poly64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p64(__p0, __p1) __extension__ ({ \
+ poly64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \
+})
+#else
+#define vst1q_p64(__p0, __p1) __extension__ ({ \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \
+})
+#else
+#define vst1q_f64(__p0, __p1) __extension__ ({ \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
+})
+#else
+#define vst1_f64(__p0, __p1) __extension__ ({ \
+ float64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
+})
+#else
+#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
+})
+#else
+#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2_t __s1 = __p1; \
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
+})
+#else
+#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2_t __s1 = __p1; \
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
+})
+#else
+#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
+})
+#else
+#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ poly8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p64_x2(__p0, __p1) __extension__ ({ \
+ poly64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
+})
+#else
+#define vst1_p64_x2(__p0, __p1) __extension__ ({ \
+ poly64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
+})
+#else
+#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ poly16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
+ poly8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
+})
+#else
+#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
+ poly8x16x2_t __s1 = __p1; \
+ poly8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
+ poly64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
+})
+#else
+#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
+ poly64x2x2_t __s1 = __p1; \
+ poly64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
+})
+#else
+#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ poly16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
+ uint8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
+})
+#else
+#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
+ uint8x16x2_t __s1 = __p1; \
+ uint8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
+})
+#else
+#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ uint32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
+ uint64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
+})
+#else
+#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
+ uint64x2x2_t __s1 = __p1; \
+ uint64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
+})
+#else
+#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ uint16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
+ int8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
+})
+#else
+#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
+ int8x16x2_t __s1 = __p1; \
+ int8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
+ float64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 42); \
+})
+#else
+#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
+ float64x2x2_t __s1 = __p1; \
+ float64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 41); \
+})
+#else
+#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ float32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 40); \
+})
+#else
+#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ float16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 34); \
+})
+#else
+#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ int32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
+ int64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 35); \
+})
+#else
+#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
+ int64x2x2_t __s1 = __p1; \
+ int64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 33); \
+})
+#else
+#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ int16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
+})
+#else
+#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ uint8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
+})
+#else
+#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ uint32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u64_x2(__p0, __p1) __extension__ ({ \
+ uint64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
+})
+#else
+#define vst1_u64_x2(__p0, __p1) __extension__ ({ \
+ uint64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
+})
+#else
+#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ uint16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
+})
+#else
+#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ int8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f64_x2(__p0, __p1) __extension__ ({ \
+ float64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
+})
+#else
+#define vst1_f64_x2(__p0, __p1) __extension__ ({ \
+ float64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 9); \
+})
+#else
+#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ float32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 8); \
+})
+#else
+#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ float16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 2); \
+})
+#else
+#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ int32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s64_x2(__p0, __p1) __extension__ ({ \
+ int64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
+})
+#else
+#define vst1_s64_x2(__p0, __p1) __extension__ ({ \
+ int64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 1); \
+})
+#else
+#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ int16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
+})
+#else
+#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ poly8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p64_x3(__p0, __p1) __extension__ ({ \
+ poly64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
+})
+#else
+#define vst1_p64_x3(__p0, __p1) __extension__ ({ \
+ poly64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
+})
+#else
+#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ poly16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
+ poly8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
+})
+#else
+#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
+ poly8x16x3_t __s1 = __p1; \
+ poly8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
+ poly64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
+})
+#else
+#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
+ poly64x2x3_t __s1 = __p1; \
+ poly64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
+})
+#else
+#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ poly16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
+ uint8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
+})
+#else
+#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
+ uint8x16x3_t __s1 = __p1; \
+ uint8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
+})
+#else
+#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ uint32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
+ uint64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
+})
+#else
+#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
+ uint64x2x3_t __s1 = __p1; \
+ uint64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
+})
+#else
+#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ uint16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
+ int8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
+})
+#else
+#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
+ int8x16x3_t __s1 = __p1; \
+ int8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
+ float64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
+})
+#else
+#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
+ float64x2x3_t __s1 = __p1; \
+ float64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
+})
+#else
+#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ float32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
+})
+#else
+#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ float16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
+})
+#else
+#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ int32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
+ int64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
+})
+#else
+#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
+ int64x2x3_t __s1 = __p1; \
+ int64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
+})
+#else
+#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ int16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
+})
+#else
+#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ uint8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
+})
+#else
+#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ uint32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u64_x3(__p0, __p1) __extension__ ({ \
+ uint64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
+})
+#else
+#define vst1_u64_x3(__p0, __p1) __extension__ ({ \
+ uint64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
+})
+#else
+#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ uint16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
+})
+#else
+#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ int8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f64_x3(__p0, __p1) __extension__ ({ \
+ float64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
+})
+#else
+#define vst1_f64_x3(__p0, __p1) __extension__ ({ \
+ float64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
+})
+#else
+#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ float32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
+})
+#else
+#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ float16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
+})
+#else
+#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ int32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s64_x3(__p0, __p1) __extension__ ({ \
+ int64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
+})
+#else
+#define vst1_s64_x3(__p0, __p1) __extension__ ({ \
+ int64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
+})
+#else
+#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ int16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
+})
+#else
+#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ poly8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p64_x4(__p0, __p1) __extension__ ({ \
+ poly64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
+})
+#else
+#define vst1_p64_x4(__p0, __p1) __extension__ ({ \
+ poly64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
+})
+#else
+#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ poly16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
+ poly8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
+})
+#else
+#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
+ poly8x16x4_t __s1 = __p1; \
+ poly8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
+ poly64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
+})
+#else
+#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
+ poly64x2x4_t __s1 = __p1; \
+ poly64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
+})
+#else
+#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ poly16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
+ uint8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
+})
+#else
+#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
+ uint8x16x4_t __s1 = __p1; \
+ uint8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
+})
+#else
+#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ uint32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
+ uint64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
+})
+#else
+#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
+ uint64x2x4_t __s1 = __p1; \
+ uint64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
+})
+#else
+#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ uint16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
+ int8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
+})
+#else
+#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
+ int8x16x4_t __s1 = __p1; \
+ int8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
+ float64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
+})
+#else
+#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
+ float64x2x4_t __s1 = __p1; \
+ float64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
+})
+#else
+#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ float32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
+})
+#else
+#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ float16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
+})
+#else
+#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ int32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
+ int64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
+})
+#else
+#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
+ int64x2x4_t __s1 = __p1; \
+ int64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
+})
+#else
+#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ int16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
+})
+#else
+#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ uint8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
+})
+#else
+#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ uint32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u64_x4(__p0, __p1) __extension__ ({ \
+ uint64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
+})
+#else
+#define vst1_u64_x4(__p0, __p1) __extension__ ({ \
+ uint64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
+})
+#else
+#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ uint16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
+})
+#else
+#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ int8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f64_x4(__p0, __p1) __extension__ ({ \
+ float64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
+})
+#else
+#define vst1_f64_x4(__p0, __p1) __extension__ ({ \
+ float64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
+})
+#else
+#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ float32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
+})
+#else
+#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ float16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
+})
+#else
+#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ int32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s64_x4(__p0, __p1) __extension__ ({ \
+ int64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
+})
+#else
+#define vst1_s64_x4(__p0, __p1) __extension__ ({ \
+ int64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
+})
+#else
+#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ int16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_p64(__p0, __p1) __extension__ ({ \
+ poly64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
+})
+#else
+#define vst2_p64(__p0, __p1) __extension__ ({ \
+ poly64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_p64(__p0, __p1) __extension__ ({ \
+ poly64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
+})
+#else
+#define vst2q_p64(__p0, __p1) __extension__ ({ \
+ poly64x2x2_t __s1 = __p1; \
+ poly64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_u64(__p0, __p1) __extension__ ({ \
+ uint64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
+})
+#else
+#define vst2q_u64(__p0, __p1) __extension__ ({ \
+ uint64x2x2_t __s1 = __p1; \
+ uint64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_f64(__p0, __p1) __extension__ ({ \
+ float64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 42); \
+})
+#else
+#define vst2q_f64(__p0, __p1) __extension__ ({ \
+ float64x2x2_t __s1 = __p1; \
+ float64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_s64(__p0, __p1) __extension__ ({ \
+ int64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 35); \
+})
+#else
+#define vst2q_s64(__p0, __p1) __extension__ ({ \
+ int64x2x2_t __s1 = __p1; \
+ int64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_f64(__p0, __p1) __extension__ ({ \
+ float64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
+})
+#else
+#define vst2_f64(__p0, __p1) __extension__ ({ \
+ float64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
+})
+#else
+#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
+})
+#else
+#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x2_t __s1 = __p1; \
+ poly8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
+})
+#else
+#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x2_t __s1 = __p1; \
+ poly64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
+})
+#else
+#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x2_t __s1 = __p1; \
+ uint8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
+})
+#else
+#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x2_t __s1 = __p1; \
+ uint64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
+})
+#else
+#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x2_t __s1 = __p1; \
+ int8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 42); \
+})
+#else
+#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x2_t __s1 = __p1; \
+ float64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 35); \
+})
+#else
+#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x2_t __s1 = __p1; \
+ int64x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
+})
+#else
+#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
+})
+#else
+#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
+})
+#else
+#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_p64(__p0, __p1) __extension__ ({ \
+ poly64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
+})
+#else
+#define vst3_p64(__p0, __p1) __extension__ ({ \
+ poly64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_p64(__p0, __p1) __extension__ ({ \
+ poly64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
+})
+#else
+#define vst3q_p64(__p0, __p1) __extension__ ({ \
+ poly64x2x3_t __s1 = __p1; \
+ poly64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_u64(__p0, __p1) __extension__ ({ \
+ uint64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
+})
+#else
+#define vst3q_u64(__p0, __p1) __extension__ ({ \
+ uint64x2x3_t __s1 = __p1; \
+ uint64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_f64(__p0, __p1) __extension__ ({ \
+ float64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
+})
+#else
+#define vst3q_f64(__p0, __p1) __extension__ ({ \
+ float64x2x3_t __s1 = __p1; \
+ float64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_s64(__p0, __p1) __extension__ ({ \
+ int64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
+})
+#else
+#define vst3q_s64(__p0, __p1) __extension__ ({ \
+ int64x2x3_t __s1 = __p1; \
+ int64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_f64(__p0, __p1) __extension__ ({ \
+ float64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
+})
+#else
+#define vst3_f64(__p0, __p1) __extension__ ({ \
+ float64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
+})
+#else
+#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
+})
+#else
+#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x3_t __s1 = __p1; \
+ poly8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
+})
+#else
+#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x3_t __s1 = __p1; \
+ poly64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
+})
+#else
+#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x3_t __s1 = __p1; \
+ uint8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
+})
+#else
+#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x3_t __s1 = __p1; \
+ uint64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
+})
+#else
+#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x3_t __s1 = __p1; \
+ int8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
+})
+#else
+#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x3_t __s1 = __p1; \
+ float64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
+})
+#else
+#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x3_t __s1 = __p1; \
+ int64x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
+})
+#else
+#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
+})
+#else
+#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
+})
+#else
+#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_p64(__p0, __p1) __extension__ ({ \
+ poly64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
+})
+#else
+#define vst4_p64(__p0, __p1) __extension__ ({ \
+ poly64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_p64(__p0, __p1) __extension__ ({ \
+ poly64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
+})
+#else
+#define vst4q_p64(__p0, __p1) __extension__ ({ \
+ poly64x2x4_t __s1 = __p1; \
+ poly64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_u64(__p0, __p1) __extension__ ({ \
+ uint64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
+})
+#else
+#define vst4q_u64(__p0, __p1) __extension__ ({ \
+ uint64x2x4_t __s1 = __p1; \
+ uint64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_f64(__p0, __p1) __extension__ ({ \
+ float64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
+})
+#else
+#define vst4q_f64(__p0, __p1) __extension__ ({ \
+ float64x2x4_t __s1 = __p1; \
+ float64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_s64(__p0, __p1) __extension__ ({ \
+ int64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
+})
+#else
+#define vst4q_s64(__p0, __p1) __extension__ ({ \
+ int64x2x4_t __s1 = __p1; \
+ int64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_f64(__p0, __p1) __extension__ ({ \
+ float64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
+})
+#else
+#define vst4_f64(__p0, __p1) __extension__ ({ \
+ float64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
+})
+#else
+#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
+})
+#else
+#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16x4_t __s1 = __p1; \
+ poly8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
+})
+#else
+#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
+ poly64x2x4_t __s1 = __p1; \
+ poly64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
+})
+#else
+#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16x4_t __s1 = __p1; \
+ uint8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
+})
+#else
+#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2x4_t __s1 = __p1; \
+ uint64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
+})
+#else
+#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16x4_t __s1 = __p1; \
+ int8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
+})
+#else
+#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x2x4_t __s1 = __p1; \
+ float64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
+})
+#else
+#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2x4_t __s1 = __p1; \
+ int64x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
+})
+#else
+#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
+})
+#else
+#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
+ float64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
+})
+#else
+#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vstrq_p128(__p0, __p1) __extension__ ({ \
+ poly128_t __s1 = __p1; \
+ __builtin_neon_vstrq_p128(__p0, __s1); \
+})
+#else
+#define vstrq_p128(__p0, __p1) __extension__ ({ \
+ poly128_t __s1 = __p1; \
+ __builtin_neon_vstrq_p128(__p0, __s1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
+ float64x1_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint16x8_t __ret;
+ __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint32x4_t __ret;
+ __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint8x16_t __ret;
+ __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int16x8_t __ret;
+ __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ int32x4_t __ret;
+ __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int8x16_t __ret;
+ __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint16x8_t __ret;
+ __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint64x2_t __ret;
+ __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint32x4_t __ret;
+ __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+ int16x8_t __ret;
+ __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
+ return __ret;
+}
+#else
+__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+ int64x2_t __ret;
+ __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+ int32x4_t __ret;
+ __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 - vmovl_high_u8(__p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 - vmovl_high_u32(__p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 - vmovl_high_u16(__p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 - vmovl_high_s8(__p1);
+ return __ret;
+}
+#else
+__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 - vmovl_high_s32(__p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 - vmovl_high_s16(__p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
+ return __ret;
+}
+#else
+__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
+ return __ret;
+}
+#else
+__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
+ return __ret;
+}
+#else
+__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
+ return __ret;
+}
+#else
+__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
+ return __ret;
+}
+#else
+__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
+ return __ret;
+}
+#else
+__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
+ return __ret;
+}
+#else
+__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
+ return __ret;
+}
+#else
+__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
+ return __ret;
+}
+#else
+__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
+ return __ret;
+}
+#else
+__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
+ return __ret;
+}
+#else
+__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
+ return __ret;
+}
+#else
+__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
+ return __ret;
+}
+#else
+__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
+ return __ret;
+}
+#else
+__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
+ return __ret;
+}
+#else
+__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
+ return __ret;
+}
+#else
+__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
+ return __ret;
+}
+#else
+__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
+ return __ret;
+}
+#else
+__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
+ return __ret;
+}
+#else
+__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
+ return __ret;
+}
+#else
+__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
+ return __ret;
+}
+#else
+__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
+ return __ret;
+}
+#else
+__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
+ return __ret;
+}
+#else
+__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
+ return __ret;
+}
+#else
+__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
+ return __ret;
+}
+#else
+__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
+ return __ret;
+}
+#else
+__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
+ uint64_t __ret;
+ __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) {
+ int8_t __ret;
+ __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) {
+ int32_t __ret;
+ __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) {
+ int64_t __ret;
+ __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
+ return __ret;
+}
+#else
+__ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) {
+ int16_t __ret;
+ __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
+ return __ret;
+}
+#else
+__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
+ return __ret;
+}
+#else
+__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
+ return __ret;
+}
+#else
+__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
+ return __ret;
+}
+#else
+__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
+ return __ret;
+}
+#else
+__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
+ return __ret;
+}
+#else
+__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
+ return __ret;
+}
+#else
+__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
+ return __ret;
+}
+#else
+__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
+ return __ret;
+}
+#else
+__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
+ return __ret;
+}
+#else
+__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
+ return __ret;
+}
+#else
+__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
+ return __ret;
+}
+#else
+__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
+ return __ret;
+}
+#else
+__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
+ return __ret;
+}
+#else
+__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
+ return __ret;
+}
+#else
+__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
+ return __ret;
+}
+#else
+__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
+ return __ret;
+}
+#else
+__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
+ return __ret;
+}
+#else
+__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
+ return __ret;
+}
+#else
+__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
+ return __ret;
+}
+#else
+__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
+ return __ret;
+}
+#else
+__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
+ return __ret;
+}
+#else
+__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
+ return __ret;
+}
+#else
+__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
+ return __ret;
+}
+#else
+__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
+ return __ret;
+}
+#else
+__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
+ return __ret;
+}
+#else
+__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
+ return __ret;
+}
+#else
+__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
+ return __ret;
+}
+#else
+__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
+ return __ret;
+}
+#else
+__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
+ return __ret;
+}
+#else
+__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
+ return __ret;
+}
+#else
+__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
+ return __ret;
+}
+#else
+__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
+ return __ret;
+}
+#else
+__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
+ return __ret;
+}
+#else
+__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
+ return __ret;
+}
+#else
+__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
+ return __ret;
+}
+#else
+__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
+ return __ret;
+}
+#else
+__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
+ return __ret;
+}
+#else
+__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
+ return __ret;
+}
+#else
+__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
+ return __ret;
+}
+#else
+__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
+ return __ret;
+}
+#else
+__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
+ return __ret;
+}
+#else
+__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
+ return __ret;
+}
+#else
+__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
+ return __ret;
+}
+#else
+__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
+ return __ret;
+}
+#else
+__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
+ return __ret;
+}
+#else
+__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
+ return __ret;
+}
+#else
+__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
+ return __ret;
+}
+#else
+__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
+ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ poly64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
+ return __ret;
+}
+#else
+__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
+ return __ret;
+}
+#else
+__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
+ return __ret;
+}
+#else
+__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
+ return __ret;
+}
+#else
+__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
+ return __ret;
+}
+#else
+__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
+ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
+ return __ret;
+}
+#else
+__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
+ return __ret;
+}
+#else
+__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
+ return __ret;
+}
+#else
+__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
+ return __ret;
+}
+#else
+__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
+ return __ret;
+}
+#else
+__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
+ return __ret;
+}
+#else
+__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
+ return __ret;
+}
+#else
+__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
+ return __ret;
+}
+#else
+__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#endif
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = __p0 + vabdq_u8(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + vabdq_u32(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 + vabdq_u16(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = __p0 + vabdq_s8(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + vabdq_s32(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 + vabdq_s16(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = __p0 + vabd_u8(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __ret;
+ __ret = __p0 + vabd_u32(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __ret;
+ __ret = __p0 + vabd_u16(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = __p0 + vabd_s8(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __ret;
+ __ret = __p0 + vabd_s32(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __ret;
+ __ret = __p0 + vabd_s16(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1))));
+ return __ret;
+}
+#else
+__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1))));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1))));
+ return __ret;
+}
+#else
+__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1))));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1))));
+ return __ret;
+}
+#else
+__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1))));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1))));
+ return __ret;
+}
+#else
+__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1))));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1))));
+ return __ret;
+}
+#else
+__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1))));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1))));
+ return __ret;
+}
+#else
+__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1))));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = vmovl_u8(__p0) + vmovl_u8(__p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = vmovl_u32(__p0) + vmovl_u32(__p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = vmovl_u16(__p0) + vmovl_u16(__p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = vmovl_s8(__p0) + vmovl_s8(__p1);
+ return __ret;
+}
+#else
+__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = vmovl_s32(__p0) + vmovl_s32(__p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = vmovl_s16(__p0) + vmovl_s16(__p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 + vmovl_u8(__p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __noswap_vmovl_u8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 + vmovl_u32(__p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 + __noswap_vmovl_u32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 + vmovl_u16(__p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __noswap_vmovl_u16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 + vmovl_s8(__p1);
+ return __ret;
+}
+#else
+__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __noswap_vmovl_s8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 + vmovl_s32(__p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 + __noswap_vmovl_s32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 + vmovl_s16(__p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __noswap_vmovl_s16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_f16(__p0_242, __p1_242) __extension__ ({ \
+ float16x4_t __s0_242 = __p0_242; \
+ float16_t __ret_242; \
+float16x4_t __reint_242 = __s0_242; \
+int16_t __reint1_242 = vget_lane_s16(*(int16x4_t *) &__reint_242, __p1_242); \
+ __ret_242 = *(float16_t *) &__reint1_242; \
+ __ret_242; \
+})
+#else
+#define vget_lane_f16(__p0_243, __p1_243) __extension__ ({ \
+ float16x4_t __s0_243 = __p0_243; \
+ float16x4_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \
+ float16_t __ret_243; \
+float16x4_t __reint_243 = __rev0_243; \
+int16_t __reint1_243 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_243, __p1_243); \
+ __ret_243 = *(float16_t *) &__reint1_243; \
+ __ret_243; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_f16(__p0_244, __p1_244) __extension__ ({ \
+ float16x8_t __s0_244 = __p0_244; \
+ float16_t __ret_244; \
+float16x8_t __reint_244 = __s0_244; \
+int16_t __reint1_244 = vgetq_lane_s16(*(int16x8_t *) &__reint_244, __p1_244); \
+ __ret_244 = *(float16_t *) &__reint1_244; \
+ __ret_244; \
+})
+#else
+#define vgetq_lane_f16(__p0_245, __p1_245) __extension__ ({ \
+ float16x8_t __s0_245 = __p0_245; \
+ float16x8_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16_t __ret_245; \
+float16x8_t __reint_245 = __rev0_245; \
+int16_t __reint1_245 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_245, __p1_245); \
+ __ret_245 = *(float16_t *) &__reint1_245; \
+ __ret_245; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 + vmull_u8(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 + __noswap_vmull_u8(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint64x2_t __ret;
+ __ret = __p0 + vmull_u32(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint64x2_t __ret;
+ __ret = __p0 + __noswap_vmull_u32(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + vmull_u16(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + __noswap_vmull_u16(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 + vmull_s8(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 + __noswap_vmull_s8(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = __p0 + vmull_s32(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = __p0 + __noswap_vmull_s32(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + vmull_s16(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + __noswap_vmull_s16(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint64x2_t __ret; \
+ __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint64x2_t __ret;
+ __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
+ return __ret;
+}
+#else
+__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint64x2_t __ret;
+ __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
+ return __ret;
+}
+#else
+__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
+ return __ret;
+}
+#else
+__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 - vmull_u8(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 - __noswap_vmull_u8(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint64x2_t __ret;
+ __ret = __p0 - vmull_u32(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint64x2_t __ret;
+ __ret = __p0 - __noswap_vmull_u32(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 - vmull_u16(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 - __noswap_vmull_u16(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 - vmull_s8(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 - __noswap_vmull_s8(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = __p0 - vmull_s32(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = __p0 - __noswap_vmull_s32(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 - vmull_s16(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 - __noswap_vmull_s16(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint64x2_t __ret; \
+ __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint64x2_t __ret;
+ __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
+ return __ret;
+}
+#else
+__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint64x2_t __ret;
+ __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
+ return __ret;
+}
+#else
+__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
+ return __ret;
+}
+#else
+__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_f16(__p0_246, __p1_246, __p2_246) __extension__ ({ \
+ float16_t __s0_246 = __p0_246; \
+ float16x4_t __s1_246 = __p1_246; \
+ float16x4_t __ret_246; \
+float16_t __reint_246 = __s0_246; \
+float16x4_t __reint1_246 = __s1_246; \
+int16x4_t __reint2_246 = vset_lane_s16(*(int16_t *) &__reint_246, *(int16x4_t *) &__reint1_246, __p2_246); \
+ __ret_246 = *(float16x4_t *) &__reint2_246; \
+ __ret_246; \
+})
+#else
+#define vset_lane_f16(__p0_247, __p1_247, __p2_247) __extension__ ({ \
+ float16_t __s0_247 = __p0_247; \
+ float16x4_t __s1_247 = __p1_247; \
+ float16x4_t __rev1_247; __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 3, 2, 1, 0); \
+ float16x4_t __ret_247; \
+float16_t __reint_247 = __s0_247; \
+float16x4_t __reint1_247 = __rev1_247; \
+int16x4_t __reint2_247 = __noswap_vset_lane_s16(*(int16_t *) &__reint_247, *(int16x4_t *) &__reint1_247, __p2_247); \
+ __ret_247 = *(float16x4_t *) &__reint2_247; \
+ __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 3, 2, 1, 0); \
+ __ret_247; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_f16(__p0_248, __p1_248, __p2_248) __extension__ ({ \
+ float16_t __s0_248 = __p0_248; \
+ float16x8_t __s1_248 = __p1_248; \
+ float16x8_t __ret_248; \
+float16_t __reint_248 = __s0_248; \
+float16x8_t __reint1_248 = __s1_248; \
+int16x8_t __reint2_248 = vsetq_lane_s16(*(int16_t *) &__reint_248, *(int16x8_t *) &__reint1_248, __p2_248); \
+ __ret_248 = *(float16x8_t *) &__reint2_248; \
+ __ret_248; \
+})
+#else
+#define vsetq_lane_f16(__p0_249, __p1_249, __p2_249) __extension__ ({ \
+ float16_t __s0_249 = __p0_249; \
+ float16x8_t __s1_249 = __p1_249; \
+ float16x8_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x8_t __ret_249; \
+float16_t __reint_249 = __s0_249; \
+float16x8_t __reint1_249 = __rev1_249; \
+int16x8_t __reint2_249 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_249, *(int16x8_t *) &__reint1_249, __p2_249); \
+ __ret_249 = *(float16x8_t *) &__reint2_249; \
+ __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret_249; \
+})
+#endif
+
+#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+ int32_t __ret;
+ __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+ int32_t __ret;
+ __ret = __noswap_vqadds_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+ int16_t __ret;
+ __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+ int16_t __ret;
+ __ret = __noswap_vqaddh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahs_lane_s32(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
+ int32_t __s0_250 = __p0_250; \
+ int32_t __s1_250 = __p1_250; \
+ int32x2_t __s2_250 = __p2_250; \
+ int32_t __ret_250; \
+ __ret_250 = vqadds_s32(__s0_250, vqrdmulhs_s32(__s1_250, vget_lane_s32(__s2_250, __p3_250))); \
+ __ret_250; \
+})
+#else
+#define vqrdmlahs_lane_s32(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
+ int32_t __s0_251 = __p0_251; \
+ int32_t __s1_251 = __p1_251; \
+ int32x2_t __s2_251 = __p2_251; \
+ int32x2_t __rev2_251; __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \
+ int32_t __ret_251; \
+ __ret_251 = __noswap_vqadds_s32(__s0_251, __noswap_vqrdmulhs_s32(__s1_251, __noswap_vget_lane_s32(__rev2_251, __p3_251))); \
+ __ret_251; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahh_lane_s16(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
+ int16_t __s0_252 = __p0_252; \
+ int16_t __s1_252 = __p1_252; \
+ int16x4_t __s2_252 = __p2_252; \
+ int16_t __ret_252; \
+ __ret_252 = vqaddh_s16(__s0_252, vqrdmulhh_s16(__s1_252, vget_lane_s16(__s2_252, __p3_252))); \
+ __ret_252; \
+})
+#else
+#define vqrdmlahh_lane_s16(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
+ int16_t __s0_253 = __p0_253; \
+ int16_t __s1_253 = __p1_253; \
+ int16x4_t __s2_253 = __p2_253; \
+ int16x4_t __rev2_253; __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 3, 2, 1, 0); \
+ int16_t __ret_253; \
+ __ret_253 = __noswap_vqaddh_s16(__s0_253, __noswap_vqrdmulhh_s16(__s1_253, __noswap_vget_lane_s16(__rev2_253, __p3_253))); \
+ __ret_253; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahs_laneq_s32(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
+ int32_t __s0_254 = __p0_254; \
+ int32_t __s1_254 = __p1_254; \
+ int32x4_t __s2_254 = __p2_254; \
+ int32_t __ret_254; \
+ __ret_254 = vqadds_s32(__s0_254, vqrdmulhs_s32(__s1_254, vgetq_lane_s32(__s2_254, __p3_254))); \
+ __ret_254; \
+})
+#else
+#define vqrdmlahs_laneq_s32(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
+ int32_t __s0_255 = __p0_255; \
+ int32_t __s1_255 = __p1_255; \
+ int32x4_t __s2_255 = __p2_255; \
+ int32x4_t __rev2_255; __rev2_255 = __builtin_shufflevector(__s2_255, __s2_255, 3, 2, 1, 0); \
+ int32_t __ret_255; \
+ __ret_255 = __noswap_vqadds_s32(__s0_255, __noswap_vqrdmulhs_s32(__s1_255, __noswap_vgetq_lane_s32(__rev2_255, __p3_255))); \
+ __ret_255; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlahh_laneq_s16(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
+ int16_t __s0_256 = __p0_256; \
+ int16_t __s1_256 = __p1_256; \
+ int16x8_t __s2_256 = __p2_256; \
+ int16_t __ret_256; \
+ __ret_256 = vqaddh_s16(__s0_256, vqrdmulhh_s16(__s1_256, vgetq_lane_s16(__s2_256, __p3_256))); \
+ __ret_256; \
+})
+#else
+#define vqrdmlahh_laneq_s16(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
+ int16_t __s0_257 = __p0_257; \
+ int16_t __s1_257 = __p1_257; \
+ int16x8_t __s2_257 = __p2_257; \
+ int16x8_t __rev2_257; __rev2_257 = __builtin_shufflevector(__s2_257, __s2_257, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16_t __ret_257; \
+ __ret_257 = __noswap_vqaddh_s16(__s0_257, __noswap_vqrdmulhh_s16(__s1_257, __noswap_vgetq_lane_s16(__rev2_257, __p3_257))); \
+ __ret_257; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+ int32_t __ret;
+ __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+ int32_t __ret;
+ __ret = __noswap_vqsubs_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+ int16_t __ret;
+ __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
+ return __ret;
+}
+#else
+__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+ int16_t __ret;
+ __ret = __noswap_vqsubh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2));
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshs_lane_s32(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
+ int32_t __s0_258 = __p0_258; \
+ int32_t __s1_258 = __p1_258; \
+ int32x2_t __s2_258 = __p2_258; \
+ int32_t __ret_258; \
+ __ret_258 = vqsubs_s32(__s0_258, vqrdmulhs_s32(__s1_258, vget_lane_s32(__s2_258, __p3_258))); \
+ __ret_258; \
+})
+#else
+#define vqrdmlshs_lane_s32(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
+ int32_t __s0_259 = __p0_259; \
+ int32_t __s1_259 = __p1_259; \
+ int32x2_t __s2_259 = __p2_259; \
+ int32x2_t __rev2_259; __rev2_259 = __builtin_shufflevector(__s2_259, __s2_259, 1, 0); \
+ int32_t __ret_259; \
+ __ret_259 = __noswap_vqsubs_s32(__s0_259, __noswap_vqrdmulhs_s32(__s1_259, __noswap_vget_lane_s32(__rev2_259, __p3_259))); \
+ __ret_259; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshh_lane_s16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
+ int16_t __s0_260 = __p0_260; \
+ int16_t __s1_260 = __p1_260; \
+ int16x4_t __s2_260 = __p2_260; \
+ int16_t __ret_260; \
+ __ret_260 = vqsubh_s16(__s0_260, vqrdmulhh_s16(__s1_260, vget_lane_s16(__s2_260, __p3_260))); \
+ __ret_260; \
+})
+#else
+#define vqrdmlshh_lane_s16(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
+ int16_t __s0_261 = __p0_261; \
+ int16_t __s1_261 = __p1_261; \
+ int16x4_t __s2_261 = __p2_261; \
+ int16x4_t __rev2_261; __rev2_261 = __builtin_shufflevector(__s2_261, __s2_261, 3, 2, 1, 0); \
+ int16_t __ret_261; \
+ __ret_261 = __noswap_vqsubh_s16(__s0_261, __noswap_vqrdmulhh_s16(__s1_261, __noswap_vget_lane_s16(__rev2_261, __p3_261))); \
+ __ret_261; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshs_laneq_s32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
+ int32_t __s0_262 = __p0_262; \
+ int32_t __s1_262 = __p1_262; \
+ int32x4_t __s2_262 = __p2_262; \
+ int32_t __ret_262; \
+ __ret_262 = vqsubs_s32(__s0_262, vqrdmulhs_s32(__s1_262, vgetq_lane_s32(__s2_262, __p3_262))); \
+ __ret_262; \
+})
+#else
+#define vqrdmlshs_laneq_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
+ int32_t __s0_263 = __p0_263; \
+ int32_t __s1_263 = __p1_263; \
+ int32x4_t __s2_263 = __p2_263; \
+ int32x4_t __rev2_263; __rev2_263 = __builtin_shufflevector(__s2_263, __s2_263, 3, 2, 1, 0); \
+ int32_t __ret_263; \
+ __ret_263 = __noswap_vqsubs_s32(__s0_263, __noswap_vqrdmulhs_s32(__s1_263, __noswap_vgetq_lane_s32(__rev2_263, __p3_263))); \
+ __ret_263; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshh_laneq_s16(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
+ int16_t __s0_264 = __p0_264; \
+ int16_t __s1_264 = __p1_264; \
+ int16x8_t __s2_264 = __p2_264; \
+ int16_t __ret_264; \
+ __ret_264 = vqsubh_s16(__s0_264, vqrdmulhh_s16(__s1_264, vgetq_lane_s16(__s2_264, __p3_264))); \
+ __ret_264; \
+})
+#else
+#define vqrdmlshh_laneq_s16(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
+ int16_t __s0_265 = __p0_265; \
+ int16_t __s1_265 = __p1_265; \
+ int16x8_t __s2_265 = __p2_265; \
+ int16x8_t __rev2_265; __rev2_265 = __builtin_shufflevector(__s2_265, __s2_265, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16_t __ret_265; \
+ __ret_265 = __noswap_vqsubh_s16(__s0_265, __noswap_vqrdmulhh_s16(__s1_265, __noswap_vgetq_lane_s16(__rev2_265, __p3_265))); \
+ __ret_265; \
+})
+#endif
+
+#endif
+#if defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint16x8_t __ret;
+ __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
+ return __ret;
+}
+#else
+__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint64x2_t __ret;
+ __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
+ return __ret;
+}
+#else
+__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint32x4_t __ret;
+ __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
+ return __ret;
+}
+#else
+__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+ int16x8_t __ret;
+ __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
+ return __ret;
+}
+#else
+__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+ int64x2_t __ret;
+ __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
+ return __ret;
+}
+#else
+__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+ int32x4_t __ret;
+ __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
+ return __ret;
+}
+#else
+__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint16x8_t __ret;
+ __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint64x2_t __ret;
+ __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint32x4_t __ret;
+ __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+ int16x8_t __ret;
+ __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
+ return __ret;
+}
+#else
+__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+ int64x2_t __ret;
+ __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+ int32x4_t __ret;
+ __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 + vmovl_high_u8(__p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 + vmovl_high_u32(__p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 + vmovl_high_u16(__p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 + vmovl_high_s8(__p1);
+ return __ret;
+}
+#else
+__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 + vmovl_high_s32(__p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 + vmovl_high_s16(__p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_p64(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
+ poly64x2_t __s0_266 = __p0_266; \
+ poly64x1_t __s2_266 = __p2_266; \
+ poly64x2_t __ret_266; \
+ __ret_266 = vsetq_lane_p64(vget_lane_p64(__s2_266, __p3_266), __s0_266, __p1_266); \
+ __ret_266; \
+})
+#else
+#define vcopyq_lane_p64(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
+ poly64x2_t __s0_267 = __p0_267; \
+ poly64x1_t __s2_267 = __p2_267; \
+ poly64x2_t __rev0_267; __rev0_267 = __builtin_shufflevector(__s0_267, __s0_267, 1, 0); \
+ poly64x2_t __ret_267; \
+ __ret_267 = __noswap_vsetq_lane_p64(__noswap_vget_lane_p64(__s2_267, __p3_267), __rev0_267, __p1_267); \
+ __ret_267 = __builtin_shufflevector(__ret_267, __ret_267, 1, 0); \
+ __ret_267; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_f64(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
+ float64x2_t __s0_268 = __p0_268; \
+ float64x1_t __s2_268 = __p2_268; \
+ float64x2_t __ret_268; \
+ __ret_268 = vsetq_lane_f64(vget_lane_f64(__s2_268, __p3_268), __s0_268, __p1_268); \
+ __ret_268; \
+})
+#else
+#define vcopyq_lane_f64(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
+ float64x2_t __s0_269 = __p0_269; \
+ float64x1_t __s2_269 = __p2_269; \
+ float64x2_t __rev0_269; __rev0_269 = __builtin_shufflevector(__s0_269, __s0_269, 1, 0); \
+ float64x2_t __ret_269; \
+ __ret_269 = __noswap_vsetq_lane_f64(__noswap_vget_lane_f64(__s2_269, __p3_269), __rev0_269, __p1_269); \
+ __ret_269 = __builtin_shufflevector(__ret_269, __ret_269, 1, 0); \
+ __ret_269; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_p64(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
+ poly64x1_t __s0_270 = __p0_270; \
+ poly64x1_t __s2_270 = __p2_270; \
+ poly64x1_t __ret_270; \
+ __ret_270 = vset_lane_p64(vget_lane_p64(__s2_270, __p3_270), __s0_270, __p1_270); \
+ __ret_270; \
+})
+#else
+#define vcopy_lane_p64(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
+ poly64x1_t __s0_271 = __p0_271; \
+ poly64x1_t __s2_271 = __p2_271; \
+ poly64x1_t __ret_271; \
+ __ret_271 = __noswap_vset_lane_p64(__noswap_vget_lane_p64(__s2_271, __p3_271), __s0_271, __p1_271); \
+ __ret_271; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_f64(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
+ float64x1_t __s0_272 = __p0_272; \
+ float64x1_t __s2_272 = __p2_272; \
+ float64x1_t __ret_272; \
+ __ret_272 = vset_lane_f64(vget_lane_f64(__s2_272, __p3_272), __s0_272, __p1_272); \
+ __ret_272; \
+})
+#else
+#define vcopy_lane_f64(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
+ float64x1_t __s0_273 = __p0_273; \
+ float64x1_t __s2_273 = __p2_273; \
+ float64x1_t __ret_273; \
+ __ret_273 = __noswap_vset_lane_f64(__noswap_vget_lane_f64(__s2_273, __p3_273), __s0_273, __p1_273); \
+ __ret_273; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_p64(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
+ poly64x2_t __s0_274 = __p0_274; \
+ poly64x2_t __s2_274 = __p2_274; \
+ poly64x2_t __ret_274; \
+ __ret_274 = vsetq_lane_p64(vgetq_lane_p64(__s2_274, __p3_274), __s0_274, __p1_274); \
+ __ret_274; \
+})
+#else
+#define vcopyq_laneq_p64(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
+ poly64x2_t __s0_275 = __p0_275; \
+ poly64x2_t __s2_275 = __p2_275; \
+ poly64x2_t __rev0_275; __rev0_275 = __builtin_shufflevector(__s0_275, __s0_275, 1, 0); \
+ poly64x2_t __rev2_275; __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 1, 0); \
+ poly64x2_t __ret_275; \
+ __ret_275 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_275, __p3_275), __rev0_275, __p1_275); \
+ __ret_275 = __builtin_shufflevector(__ret_275, __ret_275, 1, 0); \
+ __ret_275; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_f64(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
+ float64x2_t __s0_276 = __p0_276; \
+ float64x2_t __s2_276 = __p2_276; \
+ float64x2_t __ret_276; \
+ __ret_276 = vsetq_lane_f64(vgetq_lane_f64(__s2_276, __p3_276), __s0_276, __p1_276); \
+ __ret_276; \
+})
+#else
+#define vcopyq_laneq_f64(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
+ float64x2_t __s0_277 = __p0_277; \
+ float64x2_t __s2_277 = __p2_277; \
+ float64x2_t __rev0_277; __rev0_277 = __builtin_shufflevector(__s0_277, __s0_277, 1, 0); \
+ float64x2_t __rev2_277; __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 1, 0); \
+ float64x2_t __ret_277; \
+ __ret_277 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_277, __p3_277), __rev0_277, __p1_277); \
+ __ret_277 = __builtin_shufflevector(__ret_277, __ret_277, 1, 0); \
+ __ret_277; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_p64(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
+ poly64x1_t __s0_278 = __p0_278; \
+ poly64x2_t __s2_278 = __p2_278; \
+ poly64x1_t __ret_278; \
+ __ret_278 = vset_lane_p64(vgetq_lane_p64(__s2_278, __p3_278), __s0_278, __p1_278); \
+ __ret_278; \
+})
+#else
+#define vcopy_laneq_p64(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
+ poly64x1_t __s0_279 = __p0_279; \
+ poly64x2_t __s2_279 = __p2_279; \
+ poly64x2_t __rev2_279; __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 1, 0); \
+ poly64x1_t __ret_279; \
+ __ret_279 = __noswap_vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_279, __p3_279), __s0_279, __p1_279); \
+ __ret_279; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_f64(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
+ float64x1_t __s0_280 = __p0_280; \
+ float64x2_t __s2_280 = __p2_280; \
+ float64x1_t __ret_280; \
+ __ret_280 = vset_lane_f64(vgetq_lane_f64(__s2_280, __p3_280), __s0_280, __p1_280); \
+ __ret_280; \
+})
+#else
+#define vcopy_laneq_f64(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
+ float64x1_t __s0_281 = __p0_281; \
+ float64x2_t __s2_281 = __p2_281; \
+ float64x2_t __rev2_281; __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 1, 0); \
+ float64x1_t __ret_281; \
+ __ret_281 = __noswap_vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_281, __p3_281), __s0_281, __p1_281); \
+ __ret_281; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint16x8_t __ret;
+ __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint64x2_t __ret;
+ __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
+ return __ret;
+}
+#else
+__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint32x4_t __ret;
+ __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int16x8_t __ret;
+ __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
+ return __ret;
+}
+#else
+__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int64x2_t __ret;
+ __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
+ return __ret;
+}
+#else
+__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int32x4_t __ret;
+ __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint64x2_t __ret;
+ __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint32x4_t __ret;
+ __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
+ return __ret;
+}
+#else
+__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
+ return __ret;
+}
+#else
+__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint16x8_t __ret;
+ __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint64x2_t __ret;
+ __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
+ return __ret;
+}
+#else
+__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint32x4_t __ret;
+ __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int16x8_t __ret;
+ __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
+ return __ret;
+}
+#else
+__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int64x2_t __ret;
+ __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
+ return __ret;
+}
+#else
+__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int32x4_t __ret;
+ __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint64x2_t __ret;
+ __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint32x4_t __ret;
+ __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
+ return __ret;
+}
+#else
+__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
+ return __ret;
+}
+#else
+__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_lane_f64(__p0_282, __p1_282, __p2_282) __extension__ ({ \
+ float64x1_t __s0_282 = __p0_282; \
+ float64x1_t __s1_282 = __p1_282; \
+ float64x1_t __ret_282; \
+ float64_t __x_282 = vget_lane_f64(__s0_282, 0); \
+ float64_t __y_282 = vget_lane_f64(__s1_282, __p2_282); \
+ float64_t __z_282 = vmulxd_f64(__x_282, __y_282); \
+ __ret_282 = vset_lane_f64(__z_282, __s0_282, __p2_282); \
+ __ret_282; \
+})
+#else
+#define vmulx_lane_f64(__p0_283, __p1_283, __p2_283) __extension__ ({ \
+ float64x1_t __s0_283 = __p0_283; \
+ float64x1_t __s1_283 = __p1_283; \
+ float64x1_t __ret_283; \
+ float64_t __x_283 = __noswap_vget_lane_f64(__s0_283, 0); \
+ float64_t __y_283 = __noswap_vget_lane_f64(__s1_283, __p2_283); \
+ float64_t __z_283 = __noswap_vmulxd_f64(__x_283, __y_283); \
+ __ret_283 = __noswap_vset_lane_f64(__z_283, __s0_283, __p2_283); \
+ __ret_283; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_laneq_f64(__p0_284, __p1_284, __p2_284) __extension__ ({ \
+ float64x1_t __s0_284 = __p0_284; \
+ float64x2_t __s1_284 = __p1_284; \
+ float64x1_t __ret_284; \
+ float64_t __x_284 = vget_lane_f64(__s0_284, 0); \
+ float64_t __y_284 = vgetq_lane_f64(__s1_284, __p2_284); \
+ float64_t __z_284 = vmulxd_f64(__x_284, __y_284); \
+ __ret_284 = vset_lane_f64(__z_284, __s0_284, 0); \
+ __ret_284; \
+})
+#else
+#define vmulx_laneq_f64(__p0_285, __p1_285, __p2_285) __extension__ ({ \
+ float64x1_t __s0_285 = __p0_285; \
+ float64x2_t __s1_285 = __p1_285; \
+ float64x2_t __rev1_285; __rev1_285 = __builtin_shufflevector(__s1_285, __s1_285, 1, 0); \
+ float64x1_t __ret_285; \
+ float64_t __x_285 = __noswap_vget_lane_f64(__s0_285, 0); \
+ float64_t __y_285 = __noswap_vgetq_lane_f64(__rev1_285, __p2_285); \
+ float64_t __z_285 = __noswap_vmulxd_f64(__x_285, __y_285); \
+ __ret_285 = __noswap_vset_lane_f64(__z_285, __s0_285, 0); \
+ __ret_285; \
+})
+#endif
+
+#endif
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 + vabdl_u8(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint64x2_t __ret;
+ __ret = __p0 + vabdl_u32(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint64x2_t __ret;
+ __ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + vabdl_u16(__p1, __p2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 + vabdl_s8(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = __p0 + vabdl_s32(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + vabdl_s16(__p1, __p2);
+ return __ret;
+}
+#else
+__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
+ return __ret;
+}
+#endif
+
+#if defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint16x8_t __ret;
+ __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
+ return __ret;
+}
+#else
+__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint64x2_t __ret;
+ __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
+ return __ret;
+}
+#else
+__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint32x4_t __ret;
+ __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
+ return __ret;
+}
+#else
+__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int16x8_t __ret;
+ __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
+ return __ret;
+}
+#else
+__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int64x2_t __ret;
+ __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
+ return __ret;
+}
+#else
+__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int32x4_t __ret;
+ __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
+ return __ret;
+}
+#else
+__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#endif
+
+#undef __ai
+
+#endif /* __ARM_NEON_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/avx2intrin.h b/clang-2690385/lib64/clang/3.8/include/avx2intrin.h
new file mode 100644
index 00000000..f786572d
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/avx2intrin.h
@@ -0,0 +1,1215 @@
+/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx2intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX2INTRIN_H
+#define __AVX2INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx2")))
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+#define _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi8(__m256i __a)
+{
+ return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi16(__m256i __a)
+{
+ return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi32(__m256i __a)
+{
+ return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packs_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packus_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packus_epi32(__m256i __V1, __m256i __V2)
+{
+ return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qi)__a + (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a + (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a + (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi64(__m256i __a, __m256i __b)
+{
+ return __a + __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
+}
+
+#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \
+ (__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (n)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_and_si256(__m256i __a, __m256i __b)
+{
+ return __a & __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_andnot_si256(__m256i __a, __m256i __b)
+{
+ return ~__a & __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_avg_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_avg_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
+ (__v32qi)__M);
+}
+
+#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(V1), \
+ (__v16hi)(__m256i)(V2), \
+ (((M) & 0x01) ? 16 : 0), \
+ (((M) & 0x02) ? 17 : 1), \
+ (((M) & 0x04) ? 18 : 2), \
+ (((M) & 0x08) ? 19 : 3), \
+ (((M) & 0x10) ? 20 : 4), \
+ (((M) & 0x20) ? 21 : 5), \
+ (((M) & 0x40) ? 22 : 6), \
+ (((M) & 0x80) ? 23 : 7), \
+ (((M) & 0x01) ? 24 : 8), \
+ (((M) & 0x02) ? 25 : 9), \
+ (((M) & 0x04) ? 26 : 10), \
+ (((M) & 0x08) ? 27 : 11), \
+ (((M) & 0x10) ? 28 : 12), \
+ (((M) & 0x20) ? 29 : 13), \
+ (((M) & 0x40) ? 30 : 14), \
+ (((M) & 0x80) ? 31 : 15)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qi)__a == (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a == (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a == (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)(__a == __b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
+{
+ /* This function always performs a signed comparison, but __v32qi is a char
+ which may be signed or unsigned, so use __v32qs. */
+ return (__m256i)((__v32qs)__a > (__v32qs)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a > (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a > (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)(__a > __b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hadd_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hadd_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hadds_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hsub_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hsub_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hsubs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maddubs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_madd_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm256_movemask_epi8(__m256i __a)
+{
+ return __builtin_ia32_pmovmskb256((__v32qi)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mul_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mulhi_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mulhi_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mullo_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a * (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mullo_epi32 (__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a * (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mul_epu32(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_or_si256(__m256i __a, __m256i __b)
+{
+ return __a | __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sad_epu8(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_shuffle_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
+}
+
+#define _mm256_shuffle_epi32(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v8si)(__m256i)(a), \
+ (__v8si)_mm256_setzero_si256(), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
+ (__v16hi)_mm256_setzero_si256(), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6), \
+ 8, 9, 10, 11, \
+ 12 + (((imm) & 0x03) >> 0), \
+ 12 + (((imm) & 0x0c) >> 2), \
+ 12 + (((imm) & 0x30) >> 4), \
+ 12 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (imm) & 0x3,((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7, \
+ 8 + (((imm) & 0x03) >> 0), \
+ 8 + (((imm) & 0x0c) >> 2), \
+ 8 + (((imm) & 0x30) >> 4), \
+ 8 + (((imm) & 0xc0) >> 6), \
+ 12, 13, 14, 15); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sign_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sign_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sign_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
+}
+
+#define _mm256_slli_si256(a, count) __extension__ ({ \
+ (__m256i)__builtin_ia32_pslldqi256((__m256i)(a), (count)*8); })
+
+#define _mm256_bslli_epi128(a, count) _mm256_slli_si256((a), (count))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_slli_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sll_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_slli_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sll_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_slli_epi64(__m256i __a, int __count)
+{
+ return __builtin_ia32_psllqi256(__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sll_epi64(__m256i __a, __m128i __count)
+{
+ return __builtin_ia32_psllq256(__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srai_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sra_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srai_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sra_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
+}
+
+#define _mm256_srli_si256(a, count) __extension__ ({ \
+ (__m256i)__builtin_ia32_psrldqi256((__m256i)(a), (count)*8); })
+
+#define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srli_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srl_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srli_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srl_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srli_epi64(__m256i __a, int __count)
+{
+ return __builtin_ia32_psrlqi256(__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srl_epi64(__m256i __a, __m128i __count)
+{
+ return __builtin_ia32_psrlq256(__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qi)__a - (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a - (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a - (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi64(__m256i __a, __m256i __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector(__a, __b, 1, 4+1, 3, 4+3);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector(__a, __b, 0, 4+0, 2, 4+2);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_xor_si256(__m256i __a, __m256i __b)
+{
+ return __a ^ __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_stream_load_si256(__m256i const *__V)
+{
+ return (__m256i)__builtin_ia32_movntdqa256((const __v4di *)__V);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_broadcastss_ps(__m128 __X)
+{
+ return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_broadcastsd_pd(__m128d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 0);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcastss_ps(__m128 __X)
+{
+ return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcastsd_pd(__m128d __X)
+{
+ return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastsi128_si256(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector(__X, __X, 0, 1, 0, 1);
+}
+
+#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v4si)(__m128i)(V1), \
+ (__v4si)(__m128i)(V2), \
+ (((M) & 0x01) ? 4 : 0), \
+ (((M) & 0x02) ? 5 : 1), \
+ (((M) & 0x04) ? 6 : 2), \
+ (((M) & 0x08) ? 7 : 3)); })
+
+#define _mm256_blend_epi32(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), \
+ (((M) & 0x01) ? 8 : 0), \
+ (((M) & 0x02) ? 9 : 1), \
+ (((M) & 0x04) ? 10 : 2), \
+ (((M) & 0x08) ? 11 : 3), \
+ (((M) & 0x10) ? 12 : 4), \
+ (((M) & 0x20) ? 13 : 5), \
+ (((M) & 0x40) ? 14 : 6), \
+ (((M) & 0x80) ? 15 : 7)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastb_epi8(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastw_epi16(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastd_epi32(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastq_epi64(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector(__X, __X, 0, 0, 0, 0);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastb_epi8(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastw_epi16(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastd_epi32(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastq_epi64(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector(__X, __X, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
+}
+
+#define _mm256_permute4x64_pd(V, M) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V), \
+ (__v4df)_mm256_setzero_pd(), \
+ (M) & 0x3, ((M) & 0xc) >> 2, \
+ ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
+{
+ return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
+}
+
+#define _mm256_permute4x64_epi64(V, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V), \
+ (__v4di)_mm256_setzero_si256(), \
+ (M) & 0x3, ((M) & 0xc) >> 2, \
+ ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
+
+#define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (M)); })
+
+#define _mm256_extracti128_si256(V, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v4di)(__m256i)(V), \
+ (__v4di)_mm256_setzero_si256(), \
+ (((M) & 1) ? 2 : 0), \
+ (((M) & 1) ? 3 : 1) ); })
+
+#define _mm256_inserti128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V1), \
+ (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
+ (((M) & 1) ? 0 : 4), \
+ (((M) & 1) ? 1 : 5), \
+ (((M) & 1) ? 4 : 2), \
+ (((M) & 1) ? 5 : 3) ); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskload_epi32(int const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskload_epi64(long long const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskload_epi32(int const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskload_epi64(long long const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstoreq256((__v4di *)__X, __M, __Y);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstoreq(( __v2di *)__X, __M, __Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sllv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sllv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sllv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv4di(__X, __Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sllv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv2di(__X, __Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srav_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srav_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srlv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srlv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srlv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv4di(__X, __Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srlv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv2di(__X, __Y);
+}
+
+#define _mm_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)); })
+
+#define _mm256_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)(__m256d)(mask), (s)); })
+
+#define _mm_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)); })
+
+#define _mm256_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)(__m256d)(mask), (s)); })
+
+#define _mm_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
+
+#define _mm256_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)(__m256)(mask), (s)); })
+
+#define _mm_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
+
+#define _mm256_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
+
+#define _mm_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
+ (int const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8si)(__m256i)(mask), (s)); })
+
+#define _mm_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
+
+#define _mm_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)(__m256i)(mask), (s)); })
+
+#define _mm_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)(__m256i)(mask), (s)); })
+
+#define _mm_i32gather_pd(m, i, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)); })
+
+#define _mm256_i32gather_pd(m, i, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)); })
+
+#define _mm_i64gather_pd(m, i, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)); })
+
+#define _mm256_i64gather_pd(m, i, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)); })
+
+#define _mm_i32gather_ps(m, i, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
+
+#define _mm256_i32gather_ps(m, i, s) __extension__ ({ \
+ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
+ _mm256_setzero_ps(), \
+ _CMP_EQ_OQ), \
+ (s)); })
+
+#define _mm_i64gather_ps(m, i, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
+
+#define _mm256_i64gather_ps(m, i, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
+
+#define _mm_i32gather_epi32(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4si)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm256_i32gather_epi32(m, i, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
+ (int const *)(m), (__v8si)(__m256i)(i), \
+ (__v8si)_mm256_set1_epi32(-1), (s)); })
+
+#define _mm_i64gather_epi32(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v2di)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm256_i64gather_epi32(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4di)(__m256i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm_i32gather_epi64(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)); })
+
+#define _mm256_i32gather_epi64(m, i, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)); })
+
+#define _mm_i64gather_epi64(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)); })
+
+#define _mm256_i64gather_epi64(m, i, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVX2INTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/avx512bwintrin.h b/clang-2690385/lib64/clang/3.8/include/avx512bwintrin.h
new file mode 100644
index 00000000..f289ed71
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/avx512bwintrin.h
@@ -0,0 +1,1542 @@
+/*===------------- avx512bwintrin.h - AVX512BW intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512bwintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512BWINTRIN_H
+#define __AVX512BWINTRIN_H
+
+typedef unsigned int __mmask32;
+typedef unsigned long long __mmask64;
+typedef char __v64qi __attribute__ ((__vector_size__ (64)));
+typedef short __v32hi __attribute__ ((__vector_size__ (64)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
+
+static __inline __v64qi __DEFAULT_FN_ATTRS
+_mm512_setzero_qi(void) {
+ return (__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline __v32hi __DEFAULT_FN_ATTRS
+_mm512_setzero_hi(void) {
+ return (__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+/* Integer compare */
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__a, (__v32hi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__a, (__v32hi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 0,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpgtb512_mask((__v64qi)__a, (__v64qi)__b,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpgtb512_mask((__v64qi)__a, (__v64qi)__b,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 6,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtw512_mask((__v32hi)__a, (__v32hi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtw512_mask((__v32hi)__a, (__v32hi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 6,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ __u);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v64qi) __A + (__v64qi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v64qi) __A - (__v64qi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v32hi) __A + (__v32hi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v32hi) __A - (__v32hi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v32hi) __A * (__v32hi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mullo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mullo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_blendmb_512_mask ((__v64qi) __A,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_blendmw_512_mask ((__v32hi) __A,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi8 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi16 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packs_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packs_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packs_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) __W,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packs_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packs_epi16 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packs_epi16 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packus_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packus_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packus_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) __W,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packus_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packus_epi16 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packus_epi16 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_avg_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_avg_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_shuffle_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_shuffle_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_shuffle_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_epi16 (__m512i __A, __m512i __I,
+ __mmask32 __U, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermi2varhi512_mask ((__v32hi) __A,
+ (__v32hi) __I /* idx */ ,
+ (__v32hi) __B,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi16 (__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I /* idx */,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_epi16 (__m512i __A, __mmask32 __U,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I /* idx */,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_epi16 (__mmask32 __U, __m512i __A,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varhi512_maskz ((__v32hi) __I
+ /* idx */ ,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhrs_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhrs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhrs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhi_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhi_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhi_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maddubs_epi16 (__m512i __X, __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_maddubs_epi16 (__m512i __W, __mmask32 __U, __m512i __X,
+ __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_maddubs_epi16 (__mmask32 __U, __m512i __X, __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_madd_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) _mm512_setzero_si512(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_madd_epi16 (__m512i __W, __mmask16 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_madd_epi16 (__mmask16 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) _mm512_setzero_si512(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi)_mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi)__O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+#define _mm512_cmp_epi8_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), \
+ (p), (__mmask64)-1); })
+
+#define _mm512_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), \
+ (p), (__mmask64)(m)); })
+
+#define _mm512_cmp_epu8_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), \
+ (p), (__mmask64)-1); })
+
+#define _mm512_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), \
+ (p), (__mmask64)(m)); })
+
+#define _mm512_cmp_epi16_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), \
+ (p), (__mmask32)-1); })
+
+#define _mm512_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), \
+ (p), (__mmask32)(m)); })
+
+#define _mm512_cmp_epu16_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), \
+ (p), (__mmask32)-1); })
+
+#define _mm512_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), \
+ (p), (__mmask32)(m)); })
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/avx512cdintrin.h b/clang-2690385/lib64/clang/3.8/include/avx512cdintrin.h
new file mode 100644
index 00000000..3894b29f
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/avx512cdintrin.h
@@ -0,0 +1,131 @@
+/*===------------- avx512cdintrin.h - AVX512CD intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512cdintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512CDINTRIN_H
+#define __AVX512CDINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512cd")))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_conflict_epi64 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_conflict_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_conflict_epi64 (__mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_conflict_epi32 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_conflict_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_lzcnt_epi32 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_lzcnt_epi64 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_lzcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_lzcnt_epi64 (__mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/avx512dqintrin.h b/clang-2690385/lib64/clang/3.8/include/avx512dqintrin.h
new file mode 100644
index 00000000..afee4903
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/avx512dqintrin.h
@@ -0,0 +1,778 @@
+/*===---- avx512dqintrin.h - AVX512DQ intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512dqintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512DQINTRIN_H
+#define __AVX512DQINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq")))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v8di) __A * (__v8di) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_xor_pd (__m512d __A, __m512d __B) {
+ return (__m512d) ((__v8di) __A ^ (__v8di) __B);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_xor_ps (__m512 __A, __m512 __B) {
+ return (__m512) ((__v16si) __A ^ (__v16si) __B);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_or_pd (__m512d __A, __m512d __B) {
+ return (__m512d) ((__v8di) __A | (__v8di) __B);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_or_ps (__m512 __A, __m512 __B) {
+ return (__m512) ((__v16si) __A | (__v16si) __B);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_and_pd (__m512d __A, __m512d __B) {
+ return (__m512d) ((__v8di) __A & (__v8di) __B);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_and_ps (__m512 __A, __m512 __B) {
+ return (__m512) ((__v16si) __A & (__v16si) __B);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_andnot_pd (__m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_andnot_ps (__m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epi64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundpd_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundpd_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundpd_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epu64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundpd_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundpd_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundpd_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epi64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundps_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundps_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epu64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundps_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundps_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_pd (__m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi64_pd(__A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepi64_pd(__W, __U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, \
+ (__v8df) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundepi64_pd(__U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_ps (__m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi64_ps(__A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepi64_ps(__W, __U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundepi64_ps(__U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U, __R);})
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epi64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundpd_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundpd_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epu64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundpd_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundpd_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epi64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundps_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundps_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundps_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epu64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundps_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(),(__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundps_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundps_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepu64_pd (__m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu64_pd(__A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepu64_pd(__W, __U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, \
+ (__v8df) __W, (__mmask8) __U, __R);})
+
+
+#define _mm512_maskz_cvt_roundepu64_pd(__U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_cvtepu64_ps (__m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu64_ps(__A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepu64_ps(__W, __U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundepu64_ps(__U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U, __R);})
+
+#define _mm512_range_pd(__A, __B, __C) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C,\
+ (__v8df) __W, (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_range_pd(__U, __A, __B, __C) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_range_round_pd(__A, __B, __C, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_range_round_pd(__W, __U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_range_round_pd(__U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_range_ps(__A, __B, __C) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, __C, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) __W, (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_range_ps(__U, __A, __B, __C) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,(__v16sf) __B, \
+ __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_range_round_ps(__A, __B, __C, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R);})
+
+#define _mm512_mask_range_round_ps(__W, __U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) __W, (__mmask16) __U, __R);})
+
+#define _mm512_maskz_range_round_ps(__U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, __R);})
+
+#define _mm512_reduce_pd(__A, __B) __extension__ ({ \
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) __W,(__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_reduce_pd(__U, __A, __B) __extension__ ({ \
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_reduce_ps(__A, __B) __extension__ ({ \
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) __W, (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_reduce_ps(__U, __A, __B) __extension__ ({ \
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_reduce_round_pd(__A, __B, __R) __extension__ ({\
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_reduce_round_pd(__W, __U, __A, __B, __R) __extension__ ({\
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) __W,(__mmask8) __U, __R);})
+
+#define _mm512_maskz_reduce_round_pd(__U, __A, __B, __R) __extension__ ({\
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_reduce_round_ps(__A, __B, __R) __extension__ ({\
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R);})
+
+#define _mm512_mask_reduce_round_ps(__W, __U, __A, __B, __R) __extension__ ({\
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) __W, (__mmask16) __U, __R);})
+
+#define _mm512_maskz_reduce_round_ps(__U, __A, __B, __R) __extension__ ({\
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, __R);})
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/avx512erintrin.h b/clang-2690385/lib64/clang/3.8/include/avx512erintrin.h
new file mode 100644
index 00000000..40a91218
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/avx512erintrin.h
@@ -0,0 +1,285 @@
+/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512erintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512ERINTRIN_H
+#define __AVX512ERINTRIN_H
+
+// exp2a23
+#define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_exp2a23_pd(A) \
+ _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_exp2a23_pd(S, M, A) \
+ _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_exp2a23_pd(M, A) \
+ _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_exp2a23_ps(A) \
+ _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_exp2a23_ps(S, M, A) \
+ _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_exp2a23_ps(M, A) \
+ _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+// rsqrt28
+#define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_rsqrt28_pd(A) \
+ _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rsqrt28_pd(S, M, A) \
+ _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rsqrt28_pd(M, A) \
+ _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), \
+ (__mmask16)(M), (R)); })
+
+#define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(M), (R)); })
+
+#define _mm512_rsqrt28_ps(A) \
+ _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rsqrt28_ps(S, M, A) \
+ _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rsqrt28_ps(M, A) \
+ _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_rsqrt28_ss(A, B) \
+ _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rsqrt28_ss(S, M, A, B) \
+ _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rsqrt28_ss(M, A, B) \
+ _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_rsqrt28_sd(A, B) \
+ _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rsqrt28_sd(S, M, A, B) \
+ _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rsqrt28_sd(M, A, B) \
+ _mm_mask_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+// rcp28
+#define _mm512_rcp28_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_rcp28_pd(A) \
+ _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rcp28_pd(S, M, A) \
+ _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rcp28_pd(M, A) \
+ _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_rcp28_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), \
+ (__mmask16)(M), (R)); })
+
+#define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(M), (R)); })
+
+#define _mm512_rcp28_ps(A) \
+ _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rcp28_ps(S, M, A) \
+ _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rcp28_ps(M, A) \
+ _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_rcp28_ss(A, B) \
+ _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rcp28_ss(S, M, A, B) \
+ _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rcp28_ss(M, A, B) \
+ _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_rcp28_sd(A, B) \
+ _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rcp28_sd(S, M, A, B) \
+ _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rcp28_sd(M, A, B) \
+ _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#endif // __AVX512ERINTRIN_H
diff --git a/clang-2690385/lib64/clang/3.8/include/avx512fintrin.h b/clang-2690385/lib64/clang/3.8/include/avx512fintrin.h
new file mode 100644
index 00000000..8dcdc710
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/avx512fintrin.h
@@ -0,0 +1,3074 @@
+/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512FINTRIN_H
+#define __AVX512FINTRIN_H
+
+typedef double __v8df __attribute__((__vector_size__(64)));
+typedef float __v16sf __attribute__((__vector_size__(64)));
+typedef long long __v8di __attribute__((__vector_size__(64)));
+typedef int __v16si __attribute__((__vector_size__(64)));
+
+typedef float __m512 __attribute__((__vector_size__(64)));
+typedef double __m512d __attribute__((__vector_size__(64)));
+typedef long long __m512i __attribute__((__vector_size__(64)));
+
+typedef unsigned char __mmask8;
+typedef unsigned short __mmask16;
+
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
+
+/* Create vectors with repeated elements */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_setzero_si512(void)
+{
+ return (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_undefined_pd()
+{
+ return (__m512d)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_undefined()
+{
+ return (__m512)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_undefined_ps()
+{
+ return (__m512)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_undefined_epi32()
+{
+ return (__m512i)__builtin_ia32_undef512();
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
+{
+ return (__m512i) __builtin_ia32_pbroadcastd512_gpr_mask (__A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
+{
+#ifdef __x86_64__
+ return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+#else
+ return (__m512i) __builtin_ia32_pbroadcastq512_mem_mask (__A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+#endif
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_setzero_ps(void)
+{
+ return (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+}
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_setzero_pd(void)
+{
+ return (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_set1_ps(float __w)
+{
+ return (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_set1_pd(double __w)
+{
+ return (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set1_epi32(int __s)
+{
+ return (__m512i)(__v16si){ __s, __s, __s, __s, __s, __s, __s, __s,
+ __s, __s, __s, __s, __s, __s, __s, __s };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set1_epi64(long long __d)
+{
+ return (__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_broadcastss_ps(__m128 __X)
+{
+ float __f = __X[0];
+ return (__v16sf){ __f, __f, __f, __f,
+ __f, __f, __f, __f,
+ __f, __f, __f, __f,
+ __f, __f, __f, __f };
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_broadcastsd_pd(__m128d __X)
+{
+ double __d = __X[0];
+ return (__v8df){ __d, __d, __d, __d,
+ __d, __d, __d, __d };
+}
+
+/* Cast between vector types */
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_castpd256_pd512(__m256d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_castps256_ps512(__m256 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+}
+
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm512_castpd512_pd128(__m512d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm512_castps512_ps128(__m512 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
+}
+
+/* Bitwise operators */
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_and_epi32(__m512i __a, __m512i __b)
+{
+ return __a & __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pandd512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si) __src,
+ (__mmask16) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pandd512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_and_epi64(__m512i __a, __m512i __b)
+{
+ return __a & __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di) __src,
+ (__mmask8) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_andnot_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_andnot_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W, __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_pd (),
+ __U);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_or_epi32(__m512i __a, __m512i __b)
+{
+ return __a | __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pord512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si) __src,
+ (__mmask16) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pord512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_or_epi64(__m512i __a, __m512i __b)
+{
+ return __a | __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di) __src,
+ (__mmask8) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_xor_epi32(__m512i __a, __m512i __b)
+{
+ return __a ^ __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pxord512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si) __src,
+ (__mmask16) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pxord512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_xor_epi64(__m512i __a, __m512i __b)
+{
+ return __a ^ __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di) __src,
+ (__mmask8) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_and_si512(__m512i __a, __m512i __b)
+{
+ return __a & __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_or_si512(__m512i __a, __m512i __b)
+{
+ return __a | __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_xor_si512(__m512i __a, __m512i __b)
+{
+ return __a ^ __b;
+}
+/* Arithmetic */
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_add_pd(__m512d __a, __m512d __b)
+{
+ return __a + __b;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_add_ps(__m512 __a, __m512 __b)
+{
+ return __a + __b;
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_mul_pd(__m512d __a, __m512d __b)
+{
+ return __a * __b;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_mul_ps(__m512 __a, __m512 __b)
+{
+ return __a * __b;
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_sub_pd(__m512d __a, __m512d __b)
+{
+ return __a - __b;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_sub_ps(__m512 __a, __m512 __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v8di) __A + (__v8di) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v8di) __A - (__v8di) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v16si) __A + (__v16si) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v16si) __A - (__v16si) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_max_pd(__m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_max_ps(__m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_max_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_max_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_max_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_max_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_max_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_max_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline __m512i
+__DEFAULT_FN_ATTRS
+_mm512_max_epi32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epi64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_min_pd(__m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_min_ps(__m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_min_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_min_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_min_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_min_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_min_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_min_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline __m512i
+__DEFAULT_FN_ATTRS
+_mm512_min_epi32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epi64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mul_epi32(__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mul_epi32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di) __W, __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_epi32 (__mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mul_epu32(__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mul_epu32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di) __W, __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_epu32 (__mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mullo_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v16si) __A * (__v16si) __B);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mullo_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mullo_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W, __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_sqrt_pd(__m512d __a)
+{
+ return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)__a,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_sqrt_ps(__m512 __a)
+{
+ return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__a,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_rsqrt14_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_rsqrt14_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt14_ss(__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_rsqrt14_sd(__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_rcp14_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_rcp14_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp14_ss(__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_rcp14_sd(__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_floor_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+ _MM_FROUND_FLOOR,
+ (__v16sf) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_floor_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+ _MM_FROUND_FLOOR,
+ (__v8df) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_ceil_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+ _MM_FROUND_CEIL,
+ (__v16sf) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_ceil_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+ _MM_FROUND_CEIL,
+ (__v8df) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi64(__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi32(__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_add_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_add_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_add_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_add_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_add_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_add_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_add_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_add_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_addpd512_mask((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_add_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R); })
+
+#define _mm512_add_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R); })
+
+#define _mm512_mask_add_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16)__U, __R); })
+
+#define _mm512_maskz_add_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16)__U, __R); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_sub_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_sub_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_sub_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_sub_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_sub_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_sub_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_sub_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_sub_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_sub_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_sub_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+
+#define _mm512_mask_sub_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16) __U, __R); });
+
+#define _mm512_maskz_sub_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_mul_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_mul_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_mul_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mul_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_mul_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_mul_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mul_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_mul_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_mul_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_mul_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+
+#define _mm512_mask_mul_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16) __U, __R); });
+
+#define _mm512_maskz_mul_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_div_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_div_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_div_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_div_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_div_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_div_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_div_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_div_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_div_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_div_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+
+#define _mm512_mask_div_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16) __U, __R); });
+
+#define _mm512_maskz_div_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+
+#define _mm512_roundscale_ps(A, B) __extension__ ({ \
+ (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(A), (B), (__v16sf)(A), \
+ -1, _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_roundscale_pd(A, B) __extension__ ({ \
+ (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(A), (B), (__v8df)(A), \
+ -1, _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_fmadd_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_fmsub_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_fnmadd_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_fnmsub_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmadd_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_fmsub_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_fnmadd_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_fnmsub_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmaddsub_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_fmsubadd_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmaddsub_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_fmsubadd_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+
+
+/* Vector permutations */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I
+ /* idx */ ,
+ (__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I
+ /* idx */ ,
+ (__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
+ /* idx */ ,
+ (__v8df) __A,
+ (__v8df) __B,
+ (__mmask8) -1);
+}
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
+{
+ return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
+ /* idx */ ,
+ (__v16sf) __A,
+ (__v16sf) __B,
+ (__mmask16) -1);
+}
+
+#define _mm512_alignr_epi64(A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), \
+ (I), (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_alignr_epi32(A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), \
+ (I), (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+/* Vector Extract */
+
+#define _mm512_extractf64x4_pd(A, I) __extension__ ({ \
+ (__m256d) \
+ __builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), \
+ (I), \
+ (__v4df)_mm256_setzero_si256(), \
+ (__mmask8) -1); })
+
+#define _mm512_extractf32x4_ps(A, I) __extension__ ({ \
+ (__m128) \
+ __builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), \
+ (I), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8) -1); })
+
+/* Vector Blend */
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
+{
+ return (__m512d) __builtin_ia32_blendmpd_512_mask ((__v8df) __A,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
+{
+ return (__m512) __builtin_ia32_blendmps_512_mask ((__v16sf) __A,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_blendmq_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_blendmd_512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+/* Compare */
+
+#define _mm512_cmp_round_ps_mask(A, B, P, R) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (P), (__mmask16)-1, (R)); })
+
+#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (P), (__mmask16)(U), (R)); })
+
+#define _mm512_cmp_ps_mask(A, B, P) \
+ _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_cmp_ps_mask(U, A, B, P) \
+ _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_cmp_round_pd_mask(A, B, P, R) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (P), (__mmask8)-1, (R)); })
+
+#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (P), (__mmask8)(U), (R)); })
+
+#define _mm512_cmp_pd_mask(A, B, P) \
+ _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_cmp_pd_mask(U, A, B, P) \
+ _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+/* Conversion */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epu32(__m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi32_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_cvt_roundepu32_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (R)); })
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_pd(__m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepu32_pd(__m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+#define _mm512_cvt_roundpd_ps(A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_cvtps_ph(A, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(A), (I), \
+ (__v16hi)_mm256_setzero_si256(), \
+ -1); })
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_cvtph_ps(__m256i __A)
+{
+ return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epi32(__m512 __a)
+{
+ return (__m512i)
+ __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epi32(__m512d __a)
+{
+ return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a,
+ (__v8si)_mm256_setzero_si256(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epi32(A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_cvtt_roundps_epi32(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_cvt_roundps_epi32(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_cvt_roundpd_epi32(A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_cvt_roundps_epu32(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_cvt_roundpd_epu32(A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8) -1, (R)); })
+
+/* Unpack and Interleave */
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_unpackhi_pd(__m512d __a, __m512d __b)
+{
+ return __builtin_shufflevector(__a, __b, 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_unpacklo_pd(__m512d __a, __m512d __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_unpackhi_ps(__m512 __a, __m512 __b)
+{
+ return __builtin_shufflevector(__a, __b,
+ 2, 18, 3, 19,
+ 2+4, 18+4, 3+4, 19+4,
+ 2+8, 18+8, 3+8, 19+8,
+ 2+12, 18+12, 3+12, 19+12);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_unpacklo_ps(__m512 __a, __m512 __b)
+{
+ return __builtin_shufflevector(__a, __b,
+ 0, 16, 1, 17,
+ 0+4, 16+4, 1+4, 17+4,
+ 0+8, 16+8, 1+8, 17+8,
+ 0+12, 16+12, 1+12, 17+12);
+}
+
+/* Bit Test */
+
+static __inline __mmask16 __DEFAULT_FN_ATTRS
+_mm512_test_epi32_mask(__m512i __A, __m512i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+
+static __inline __mmask8 __DEFAULT_FN_ATTRS
+_mm512_test_epi64_mask(__m512i __A, __m512i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+/* SIMD load ops */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqusi512_mask ((const __v16si *)__P,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqudi512_mask ((const __v8di *)__P,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_loadups512_mask ((const __v16sf *)__P,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_loadupd512_mask ((const __v8df *)__P,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_load_ps(__mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_load_pd(__mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_loadu_pd(double const *__p)
+{
+ struct __loadu_pd {
+ __m512d __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_pd*)__p)->__v;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_loadu_ps(float const *__p)
+{
+ struct __loadu_ps {
+ __m512 __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_ps*)__p)->__v;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_load_ps(double const *__p)
+{
+ return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__p,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_load_pd(float const *__p)
+{
+ return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__p,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+/* SIMD store ops */
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
+{
+ __builtin_ia32_storedqudi512_mask ((__v8di *)__P, (__v8di) __A,
+ (__mmask8) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
+{
+ __builtin_ia32_storedqusi512_mask ((__v16si *)__P, (__v16si) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
+{
+ __builtin_ia32_storeupd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_storeu_pd(void *__P, __m512d __A)
+{
+ __builtin_ia32_storeupd512_mask((__v8df *)__P, (__v8df)__A, (__mmask8)-1);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
+{
+ __builtin_ia32_storeups512_mask ((__v16sf *)__P, (__v16sf) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_storeu_ps(void *__P, __m512 __A)
+{
+ __builtin_ia32_storeups512_mask((__v16sf *)__P, (__v16sf)__A, (__mmask16)-1);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A)
+{
+ __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_pd(void *__P, __m512d __A)
+{
+ *(__m512d*)__P = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A)
+{
+ __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_ps(void *__P, __m512 __A)
+{
+ *(__m512*)__P = __A;
+}
+
+/* Mask ops */
+
+static __inline __mmask16 __DEFAULT_FN_ATTRS
+_mm512_knot(__mmask16 __M)
+{
+ return __builtin_ia32_knothi(__M);
+}
+
+/* Integer compare */
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__a, (__v16si)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__a, (__v16si)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 0,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__a, (__v16si)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__a, (__v16si)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 6,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__a, (__v8di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__a, (__v8di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ __u);
+}
+
+#define _mm512_cmp_epi32_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
+ (__mmask16)-1); })
+
+#define _mm512_cmp_epu32_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
+ (__mmask16)-1); })
+
+#define _mm512_cmp_epi64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
+ (__mmask8)-1); })
+
+#define _mm512_cmp_epu64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
+ (__mmask16)(m)); })
+
+#define _mm512_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
+ (__mmask16)(m)); })
+
+#define _mm512_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
+ (__mmask8)(m)); })
+
+#define _mm512_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
+ (__mmask8)(m)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif // __AVX512FINTRIN_H
diff --git a/clang-2690385/lib64/clang/3.8/include/avx512vlbwintrin.h b/clang-2690385/lib64/clang/3.8/include/avx512vlbwintrin.h
new file mode 100644
index 00000000..b4542d69
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/avx512vlbwintrin.h
@@ -0,0 +1,2336 @@
+/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlbwintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLBWINTRIN_H
+#define __AVX512VLBWINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw")))
+
+/* Integer compare */
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__a, (__v32qi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__a, (__v32qi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 0,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__a, (__v8hi)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__a, (__v8hi)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqw256_mask((__v16hi)__a, (__v16hi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqw256_mask((__v16hi)__a, (__v16hi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 0,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtb128_mask((__v16qi)__a, (__v16qi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtb128_mask((__v16qi)__a, (__v16qi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 6,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtb256_mask((__v32qi)__a, (__v32qi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtb256_mask((__v32qi)__a, (__v32qi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 6,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtw128_mask((__v8hi)__a, (__v8hi)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtw128_mask((__v8hi)__a, (__v8hi)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtw256_mask((__v16hi)__a, (__v16hi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtw256_mask((__v16hi)__a, (__v16hi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 6,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmple_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmple_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ __u);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){
+ return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mullo_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mullo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mullo_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W)
+{
+ return (__m128i) __builtin_ia32_blendmb_128_mask ((__v16qi) __A,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W)
+{
+ return (__m256i) __builtin_ia32_blendmb_256_mask ((__v32qi) __A,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W)
+{
+ return (__m128i) __builtin_ia32_blendmw_128_mask ((__v8hi) __A,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
+{
+ return (__m256i) __builtin_ia32_blendmw_256_mask ((__v16hi) __A,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi8 (__mmask16 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi16 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi16 (__mmask16 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packs_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) _mm_setzero_si128 (), __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packs_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packs_epi32 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packs_epi32 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packs_epi16 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packs_epi16 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) __W,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packs_epi16 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packs_epi16 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) __W,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packus_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packus_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packus_epi32 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packus_epi32 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) __W,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packus_epi16 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packus_epi16 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) __W,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packus_epi16 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packus_epi16 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) __W,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_avg_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_avg_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_avg_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_avg_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_avg_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_avg_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_avg_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_avg_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_shuffle_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_shuffle_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_shuffle_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_shuffle_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi16 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermi2varhi128_mask ((__v8hi) __A,
+ (__v8hi) __I /* idx */ ,
+ (__v8hi) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi16 (__m256i __A, __m256i __I,
+ __mmask16 __U, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermi2varhi256_mask ((__v16hi) __A,
+ (__v16hi) __I /* idx */ ,
+ (__v16hi) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi16 (__m128i __A, __m128i __I, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I/* idx */,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi16 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I/* idx */,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varhi128_maskz ((__v8hi) __I/* idx */,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi16 (__m256i __A, __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I/* idx */,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi16 (__m256i __A, __mmask16 __U,
+ __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I/* idx */,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A,
+ __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varhi256_maskz ((__v16hi) __I/* idx */,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_maddubs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_maddubs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_maddubs_epi16 (__m256i __W, __mmask16 __U, __m256i __X,
+ __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_maddubs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_madd_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_madd_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v4si) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_madd_epi16 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_madd_epi16 (__mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v8si) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi16_epi8 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi16_epi8 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi16_epi8 (__m128i __A) {
+
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhrs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
+ (__v8hi) __Y,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhrs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
+ (__v8hi) __Y,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhrs_epi16 (__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
+ (__v16hi) __Y,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhrs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
+ (__v16hi) __Y,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhi_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhi_epu16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhi_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhi_epu16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+#define _mm_cmp_epi8_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), \
+ (p), (__mmask16)-1); })
+
+#define _mm_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), \
+ (p), (__mmask16)(m)); })
+
+#define _mm_cmp_epu8_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), \
+ (p), (__mmask16)-1); })
+
+#define _mm_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), \
+ (p), (__mmask16)(m)); })
+
+#define _mm256_cmp_epi8_mask(a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), \
+ (p), (__mmask32)-1); })
+
+#define _mm256_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), \
+ (p), (__mmask32)(m)); })
+
+#define _mm256_cmp_epu8_mask(a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), \
+ (p), (__mmask32)-1); })
+
+#define _mm256_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), \
+ (p), (__mmask32)(m)); })
+
+#define _mm_cmp_epi16_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm_cmp_epu16_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_epi16_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), \
+ (p), (__mmask16)-1); })
+
+#define _mm256_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), \
+ (p), (__mmask16)(m)); })
+
+#define _mm256_cmp_epu16_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), \
+ (p), (__mmask16)-1); })
+
+#define _mm256_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), \
+ (p), (__mmask16)(m)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVX512VLBWINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/avx512vldqintrin.h b/clang-2690385/lib64/clang/3.8/include/avx512vldqintrin.h
new file mode 100644
index 00000000..dfd858e0
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/avx512vldqintrin.h
@@ -0,0 +1,953 @@
+/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vldqintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLDQINTRIN_H
+#define __AVX512VLDQINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq")))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mullo_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) ((__v4di) __A * (__v4di) __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mullo_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mullo_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mullo_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) ((__v2di) __A * (__v2di) __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mullo_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mullo_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_and_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_and_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_and_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_and_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_and_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_xor_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_xor_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_xor_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_xor_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_xor_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_or_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_or_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_or_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_or_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_or_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_or_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_or_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_or_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epi64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epi64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epu64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epu64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epi64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epi64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epu64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epu64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepi64_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_pd (__m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepi64_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_ps (__m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epi64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epi64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epu64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epu64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epi64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epi64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epu64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epu64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepu64_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepu64_pd (__m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepu64_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtepu64_ps (__m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+#define _mm_range_pd(__A, __B, __C) __extension__ ({ \
+ (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1); })
+
+#define _mm_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
+ (__v2df) __W, (__mmask8) __U); })
+
+#define _mm_maskz_range_pd(__U, __A, __B, __C) __extension__ ({ \
+ (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U); })
+
+#define _mm256_range_pd(__A, __B, __C) __extension__ ({ \
+ (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) -1); })
+
+#define _mm256_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
+ (__v4df) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_range_pd(__U, __A, __B, __C) __extension__ ({ \
+ (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+
+#define _mm_range_ps(__A, __B, __C) __extension__ ({ \
+ (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+
+#define _mm_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C, \
+ (__v4sf) __W, (__mmask8) __U); })
+
+#define _mm_maskz_range_ps(__U, __A, __B, __C) __extension__ ({ \
+ (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+
+#define _mm256_range_ps(__A, __B, __C) __extension__ ({ \
+ (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+
+#define _mm256_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C, \
+ (__v8sf) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_range_ps(__U, __A, __B, __C) __extension__ ({ \
+ (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+
+#define _mm_reduce_pd(__A, __B) __extension__ ({ \
+ (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1); })
+
+#define _mm_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
+ (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
+ (__v2df) __W, (__mmask8) __U); })
+
+#define _mm_maskz_reduce_pd(__U, __A, __B) __extension__ ({ \
+ (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U); })
+
+#define _mm256_reduce_pd(__A, __B) __extension__ ({ \
+ (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) -1); })
+
+#define _mm256_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
+ (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, \
+ (__v4df) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_reduce_pd(__U, __A, __B) __extension__ ({ \
+ (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+
+#define _mm_reduce_ps(__A, __B) __extension__ ({ \
+ (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+
+#define _mm_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
+ (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, \
+ (__v4sf) __W, (__mmask8) __U); })
+
+#define _mm_maskz_reduce_ps(__U, __A, __B) __extension__ ({ \
+ (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+
+#define _mm256_reduce_ps(__A, __B) __extension__ ({ \
+ (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+
+#define _mm256_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
+ (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, \
+ (__v8sf) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_reduce_ps(__U, __A, __B) __extension__ ({ \
+ (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/avx512vlintrin.h b/clang-2690385/lib64/clang/3.8/include/avx512vlintrin.h
new file mode 100644
index 00000000..8f13536f
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/avx512vlintrin.h
@@ -0,0 +1,4606 @@
+/*===---- avx512vlintrin.h - AVX512VL intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLINTRIN_H
+#define __AVX512VLINTRIN_H
+
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl")))
+#define __DEFAULT_FN_ATTRS_BOTH __attribute__((__always_inline__, __nodebug__, __target__("avx512vl, avx512bw")))
+
+/* Integer compare */
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_cmpeq_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_mask_cmpeq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_cmpeq_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_mask_cmpeq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_cmpeq_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_mask_cmpeq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_cmpeq_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_mask_cmpeq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 0,
+ __u);
+}
+
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_cmpgt_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_mask_cmpgt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_cmpgt_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_mask_cmpgt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_cmpgt_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_mask_cmpgt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_cmpgt_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_mask_cmpgt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ __u);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mullo_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mullo_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_and_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_and_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_and_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_and_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_or_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_or_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_or_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_xor_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_xor_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_xor_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_and_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_and_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_pd (),
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_and_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_and_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_pd (),
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_pd (),
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_pd (),
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_or_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_or_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_or_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_or_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_xor_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_xor_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_xor_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+#define _mm_cmp_epi32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm_cmp_epu32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_epi32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_epu32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm_cmp_epi64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm_cmp_epu64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_epi64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_epu64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_ps_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_pd_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256)(a), \
+ (__v4df)(__m256)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256)(a), \
+ (__v4df)(__m256)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm128_cmp_ps_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm128_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm128_cmp_pd_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128)(a), \
+ (__v2df)(__m128)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm128_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128)(a), \
+ (__v2df)(__m128)(b), \
+ (p), (__mmask8)(m)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask3 (-(__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask3 (-(__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask3 (-(__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask3 (-(__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B,
+ __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmsubpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmsubpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmsubps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmsubps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmsubaddpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmsubaddpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmsubaddps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmsubaddps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfnmaddpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfnmaddpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfnmaddps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfnmaddps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfnmsubpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfnmsubpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfnmsubpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfnmsubpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfnmsubps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfnmsubps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfnmsubps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfnmsubps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_add_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_add_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_add_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_add_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_add_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_add_ps (__mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_add_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_add_ps (__mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) {
+ return (__m128i) __builtin_ia32_blendmd_128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) {
+ return (__m256i) __builtin_ia32_blendmd_256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) {
+ return (__m128d) __builtin_ia32_blendmpd_128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) {
+ return (__m256d) __builtin_ia32_blendmpd_256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) {
+ return (__m128) __builtin_ia32_blendmps_128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) {
+ return (__m256) __builtin_ia32_blendmps_256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) {
+ return (__m128i) __builtin_ia32_blendmq_128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) {
+ return (__m256i) __builtin_ia32_blendmq_256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) {
+ __builtin_ia32_compressstoredf128_mask ((__v2df *) __P,
+ (__v2df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) {
+ __builtin_ia32_compressstoredf256_mask ((__v4df *) __P,
+ (__v4df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) {
+ __builtin_ia32_compressstoredi128_mask ((__v2di *) __P,
+ (__v2di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) {
+ __builtin_ia32_compressstoredi256_mask ((__v4di *) __P,
+ (__v4di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) {
+ __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P,
+ (__v4sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) {
+ __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P,
+ (__v8sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) {
+ __builtin_ia32_compressstoresi128_mask ((__v4si *) __P,
+ (__v4si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) {
+ __builtin_ia32_compressstoresi256_mask ((__v8si *) __P,
+ (__v8si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_ps (__mmask16 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_ps (__mmask16 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epu32 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epu32 (__m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) {
+ return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
+ return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) {
+ return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
+ return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epu32 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epu32 (__m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epu32 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epu32 (__m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epu32 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epu32 (__m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepu32_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_pd (__m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepu32_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_ps (__m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_div_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_div_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_div_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_div_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_div_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_div_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_div_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_div_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) {
+ return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
+ (__v2df) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
+ return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) {
+ return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
+ (__v4df) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
+ return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
+ (__v2di) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U,
+ void const *__P) {
+ return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
+ (__v4di) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
+ return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) {
+ return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
+ return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) {
+ return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
+ return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
+ (__v4si) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U,
+ void const *__P) {
+ return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
+ (__v8si) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
+ return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_getexp_pd (__m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_getexp_pd (__m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_getexp_ps (__m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_getexp_ps (__m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_max_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_max_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_max_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_max_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_max_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_max_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_max_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_max_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_min_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_min_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_min_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_min_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_min_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_min_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_min_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_min_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_mul_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_mul_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_mul_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_mul_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_mul_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_mul_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi64 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi64 (__m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+#define _mm_roundscale_pd(__A, __imm) __extension__ ({ \
+ (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, \
+ __imm, (__v2df) _mm_setzero_pd (), (__mmask8) -1); })
+
+
+#define _mm_mask_roundscale_pd(__W, __U, __A, __imm) __extension__ ({ \
+ (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, __imm, \
+ (__v2df) __W, (__mmask8) __U); })
+
+
+#define _mm_maskz_roundscale_pd(__U, __A, __imm) __extension__ ({ \
+ (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, __imm, \
+ (__v2df) _mm_setzero_pd (), (__mmask8) __U); })
+
+
+#define _mm256_roundscale_pd(__A, __imm) __extension__ ({ \
+ (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
+ (__v4df) _mm256_setzero_pd (), (__mmask8) -1); })
+
+
+#define _mm256_mask_roundscale_pd(__W, __U, __A, __imm) __extension__ ({ \
+ (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
+ (__v4df) __W, (__mmask8) __U); })
+
+
+#define _mm256_maskz_roundscale_pd(__U, __A, __imm) __extension__ ({ \
+ (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+
+#define _mm_roundscale_ps(__A, __imm) __extension__ ({ \
+ (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+
+
+#define _mm_mask_roundscale_ps(__W, __U, __A, __imm) __extension__ ({ \
+ (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
+ (__v4sf) __W, (__mmask8) __U); })
+
+
+#define _mm_maskz_roundscale_ps(__U, __A, __imm) __extension__ ({ \
+ (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+
+#define _mm256_roundscale_ps(__A, __imm) __extension__ ({ \
+ (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A,__imm, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+
+#define _mm256_mask_roundscale_ps(__W, __U, __A,__imm) __extension__ ({ \
+ (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, __imm, \
+ (__v8sf) __W, (__mmask8) __U); })
+
+
+#define _mm256_maskz_roundscale_ps(__U, __A, __imm) __extension__ ({ \
+ (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, __imm, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_scalef_pd (__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_scalef_pd (__m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_scalef_ps (__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_scalef_ps (__m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+#define _mm_i64scatter_pd(__addr,__index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2df(__addr, (__mmask8) 0xFF, (__v2di) __index, \
+ (__v2df) __v1, __scale); })
+
+#define _mm_mask_i64scatter_pd(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2df (__addr, __mask, (__v2di) __index, \
+ (__v2df) __v1, __scale); })
+
+
+#define _mm_i64scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2di (__addr, (__mmask8) 0xFF, \
+ (__v2di) __index, (__v2di) __v1, __scale); })
+
+#define _mm_mask_i64scatter_epi64(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2di (__addr, __mask, (__v2di) __index,\
+ (__v2di) __v1, __scale); })
+
+#define _mm256_i64scatter_pd(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4df (__addr, (__mmask8) 0xFF,\
+ (__v4di) __index, (__v4df) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_pd(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4df (__addr, __mask, (__v4di) __index,\
+ (__v4df) __v1, __scale); })
+
+#define _mm256_i64scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4di (__addr, (__mmask8) 0xFF, (__v4di) __index,\
+ (__v4di) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_epi64(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4di (__addr, __mask, (__v4di) __index,\
+ (__v4di) __v1, __scale); })
+
+#define _mm_i64scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4sf (__addr, (__mmask8) 0xFF,\
+ (__v2di) __index, (__v4sf) __v1, __scale); })
+
+#define _mm_mask_i64scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4sf (__addr, __mask, (__v2di) __index,\
+ (__v4sf) __v1, __scale); })
+
+#define _mm_i64scatter_epi32(__addr, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4si (__addr, (__mmask8) 0xFF,\
+ (__v2di) __index, (__v4si) __v1, __scale); })
+
+#define _mm_mask_i64scatter_epi32(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4si (__addr, __mask, (__v2di) __index,\
+ (__v4si) __v1, __scale); })
+
+#define _mm256_i64scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8sf (__addr, (__mmask8) 0xFF, (__v4di) __index, \
+ (__v4sf) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8sf (__addr, __mask, (__v4di) __index, \
+ (__v4sf) __v1, __scale); })
+
+#define _mm256_i64scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8si (__addr, (__mmask8) 0xFF, \
+ (__v4di) __index, (__v4si) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_epi32(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8si(__addr, __mask, (__v4di) __index, \
+ (__v4si) __v1, __scale); })
+
+#define _mm_i32scatter_pd(__addr, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2df (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v2df) __v1, __scale); })
+
+#define _mm_mask_i32scatter_pd(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2df (__addr, __mask, (__v4si) __index,\
+ (__v2df) __v1, __scale); })
+
+#define _mm_i32scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2di (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v2di) __v1, __scale); })
+
+#define _mm_mask_i32scatter_epi64(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2di (__addr, __mask, (__v4si) __index, \
+ (__v2di) __v1, __scale); })
+
+#define _mm256_i32scatter_pd(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4df (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4df) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_pd(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4df (__addr, __mask, (__v4si) __index, \
+ (__v4df) __v1, __scale); })
+
+#define _mm256_i32scatter_epi64(__addr, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4di (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4di) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_epi64(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4di (__addr, __mask, (__v4si) __index, \
+ (__v4di) __v1, __scale); })
+
+#define _mm_i32scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4sf (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4sf) __v1, __scale); })
+
+#define _mm_mask_i32scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4sf (__addr, __mask, (__v4si) __index, \
+ (__v4sf) __v1, __scale); })
+
+#define _mm_i32scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4si (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4si) __v1, __scale); })
+
+#define _mm_mask_i32scatter_epi32(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4si (__addr, __mask, (__v4si) __index,\
+ (__v4si) __v1, __scale); })
+
+#define _mm256_i32scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8sf (__addr, (__mmask8) 0xFF, \
+ (__v8si) __index, (__v8sf) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8sf (__addr, __mask, (__v8si) __index,\
+ (__v8sf) __v1, __scale); })
+
+#define _mm256_i32scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8si (__addr, (__mmask8) 0xFF, \
+ (__v8si) __index, (__v8si) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_epi32(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8si (__addr, __mask, (__v8si) __index, \
+ (__v8si) __v1, __scale); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_sqrt_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_sqrt_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_sqrt_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_sqrt_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sub_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sub_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_sub_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sub_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sub_ps (__mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_sub_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_ps (__mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi32 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermi2vard128_mask ((__v4si) __A,
+ (__v4si) __I
+ /* idx */ ,
+ (__v4si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi32 (__m256i __A, __m256i __I,
+ __mmask8 __U, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermi2vard256_mask ((__v8si) __A,
+ (__v8si) __I
+ /* idx */ ,
+ (__v8si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_pd (__m128d __A, __m128i __I, __mmask8 __U,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermi2varpd128_mask ((__v2df) __A,
+ (__v2di) __I
+ /* idx */ ,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_pd (__m256d __A, __m256i __I, __mmask8 __U,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermi2varpd256_mask ((__v4df) __A,
+ (__v4di) __I
+ /* idx */ ,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_ps (__m128 __A, __m128i __I, __mmask8 __U,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermi2varps128_mask ((__v4sf) __A,
+ (__v4si) __I
+ /* idx */ ,
+ (__v4sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_ps (__m256 __A, __m256i __I, __mmask8 __U,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermi2varps256_mask ((__v8sf) __A,
+ (__v8si) __I
+ /* idx */ ,
+ (__v8sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi64 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermi2varq128_mask ((__v2di) __A,
+ (__v2di) __I
+ /* idx */ ,
+ (__v2di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi64 (__m256i __A, __m256i __I,
+ __mmask8 __U, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermi2varq256_mask ((__v4di) __A,
+ (__v4di) __I
+ /* idx */ ,
+ (__v4di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi32 (__m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi32 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi32 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_maskz ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi32 (__m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi32 (__m256i __A, __mmask8 __U, __m256i __I,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi32 (__mmask8 __U, __m256i __A,
+ __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_maskz ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_permutex2var_pd (__m128d __A, __m128i __I, __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) -
+ 1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_pd (__m128d __A, __mmask8 __U, __m128i __I,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_pd (__mmask8 __U, __m128d __A, __m128i __I,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_maskz ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_permutex2var_pd (__m256d __A, __m256i __I, __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8) -
+ 1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_pd (__m256d __A, __mmask8 __U, __m256i __I,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_pd (__mmask8 __U, __m256d __A, __m256i __I,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_maskz ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_permutex2var_ps (__m128 __A, __m128i __I, __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_ps (__m128 __A, __mmask8 __U, __m128i __I,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_ps (__mmask8 __U, __m128 __A, __m128i __I,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_maskz ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_permutex2var_ps (__m256 __A, __m256i __I, __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_ps (__m256 __A, __mmask8 __U, __m256i __I,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_ps (__mmask8 __U, __m256 __A, __m256i __I,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_maskz ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi64 (__m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi64 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi64 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_maskz ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8)
+ __U);
+}
+
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi64 (__m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi64 (__m256i __A, __mmask8 __U, __m256i __I,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi64 (__mmask8 __U, __m256i __A,
+ __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_maskz ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8)
+ __U);
+}
+
+#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_BOTH
+
+#endif /* __AVX512VLINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/avxintrin.h b/clang-2690385/lib64/clang/3.8/include/avxintrin.h
new file mode 100644
index 00000000..6d1ca547
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/avxintrin.h
@@ -0,0 +1,1318 @@
+/*===---- avxintrin.h - AVX intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVXINTRIN_H
+#define __AVXINTRIN_H
+
+typedef double __v4df __attribute__ ((__vector_size__ (32)));
+typedef float __v8sf __attribute__ ((__vector_size__ (32)));
+typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+typedef int __v8si __attribute__ ((__vector_size__ (32)));
+typedef short __v16hi __attribute__ ((__vector_size__ (32)));
+typedef char __v32qi __attribute__ ((__vector_size__ (32)));
+
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v32qs __attribute__((__vector_size__(32)));
+
+typedef float __m256 __attribute__ ((__vector_size__ (32)));
+typedef double __m256d __attribute__((__vector_size__(32)));
+typedef long long __m256i __attribute__((__vector_size__(32)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx")))
+
+/* Arithmetic */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_add_pd(__m256d __a, __m256d __b)
+{
+ return __a+__b;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_add_ps(__m256 __a, __m256 __b)
+{
+ return __a+__b;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_sub_pd(__m256d __a, __m256d __b)
+{
+ return __a-__b;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_sub_ps(__m256 __a, __m256 __b)
+{
+ return __a-__b;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_addsub_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_addsub_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_div_pd(__m256d __a, __m256d __b)
+{
+ return __a / __b;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_div_ps(__m256 __a, __m256 __b)
+{
+ return __a / __b;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_max_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_max_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_min_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_min_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_mul_pd(__m256d __a, __m256d __b)
+{
+ return __a * __b;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_mul_ps(__m256 __a, __m256 __b)
+{
+ return __a * __b;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_sqrt_pd(__m256d __a)
+{
+ return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_sqrt_ps(__m256 __a)
+{
+ return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_rsqrt_ps(__m256 __a)
+{
+ return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_rcp_ps(__m256 __a)
+{
+ return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
+}
+
+#define _mm256_round_pd(V, M) __extension__ ({ \
+ (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
+
+#define _mm256_round_ps(V, M) __extension__ ({ \
+ (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
+
+#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
+#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
+#define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
+#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
+
+/* Logical */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_and_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4di)__a & (__v4di)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_and_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8si)__a & (__v8si)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_andnot_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)(~(__v4di)__a & (__v4di)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_andnot_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)(~(__v8si)__a & (__v8si)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_or_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4di)__a | (__v4di)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_or_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8si)__a | (__v8si)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_xor_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4di)__a ^ (__v4di)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_xor_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8si)__a ^ (__v8si)__b);
+}
+
+/* Horizontal arithmetic */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_hadd_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_hadd_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_hsub_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_hsub_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);
+}
+
+/* Vector permutations */
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm_permutevar_pd(__m128d __a, __m128i __c)
+{
+ return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_permutevar_pd(__m256d __a, __m256i __c)
+{
+ return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_permutevar_ps(__m128 __a, __m128i __c)
+{
+ return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_permutevar_ps(__m256 __a, __m256i __c)
+{
+ return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
+}
+
+#define _mm_permute_pd(A, C) __extension__ ({ \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \
+ (__v2df)_mm_setzero_pd(), \
+ (C) & 0x1, ((C) & 0x2) >> 1); })
+
+#define _mm256_permute_pd(A, C) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
+ (__v4df)_mm256_setzero_pd(), \
+ (C) & 0x1, ((C) & 0x2) >> 1, \
+ 2 + (((C) & 0x4) >> 2), \
+ 2 + (((C) & 0x8) >> 3)); })
+
+#define _mm_permute_ps(A, C) __extension__ ({ \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
+ (__v4sf)_mm_setzero_ps(), \
+ (C) & 0x3, ((C) & 0xc) >> 2, \
+ ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
+
+#define _mm256_permute_ps(A, C) __extension__ ({ \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (C) & 0x3, ((C) & 0xc) >> 2, \
+ ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
+ 4 + (((C) & 0x03) >> 0), \
+ 4 + (((C) & 0x0c) >> 2), \
+ 4 + (((C) & 0x30) >> 4), \
+ 4 + (((C) & 0xc0) >> 6)); })
+
+#define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
+ (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), (M)); })
+
+#define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (M)); })
+
+#define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), (M)); })
+
+/* Vector Blend */
+#define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), \
+ (((M) & 0x01) ? 4 : 0), \
+ (((M) & 0x02) ? 5 : 1), \
+ (((M) & 0x04) ? 6 : 2), \
+ (((M) & 0x08) ? 7 : 3)); })
+
+#define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), \
+ (((M) & 0x01) ? 8 : 0), \
+ (((M) & 0x02) ? 9 : 1), \
+ (((M) & 0x04) ? 10 : 2), \
+ (((M) & 0x08) ? 11 : 3), \
+ (((M) & 0x10) ? 12 : 4), \
+ (((M) & 0x20) ? 13 : 5), \
+ (((M) & 0x40) ? 14 : 6), \
+ (((M) & 0x80) ? 15 : 7)); })
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
+{
+ return (__m256d)__builtin_ia32_blendvpd256(
+ (__v4df)__a, (__v4df)__b, (__v4df)__c);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
+{
+ return (__m256)__builtin_ia32_blendvps256(
+ (__v8sf)__a, (__v8sf)__b, (__v8sf)__c);
+}
+
+/* Vector Dot Product */
+#define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (M)); })
+
+/* Vector shuffle */
+#define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), \
+ (mask) & 0x3, \
+ ((mask) & 0xc) >> 2, \
+ (((mask) & 0x30) >> 4) + 8, \
+ (((mask) & 0xc0) >> 6) + 8, \
+ ((mask) & 0x3) + 4, \
+ (((mask) & 0xc) >> 2) + 4, \
+ (((mask) & 0x30) >> 4) + 12, \
+ (((mask) & 0xc0) >> 6) + 12); })
+
+#define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), \
+ (mask) & 0x1, \
+ (((mask) & 0x2) >> 1) + 4, \
+ (((mask) & 0x4) >> 2) + 2, \
+ (((mask) & 0x8) >> 3) + 6); })
+
+/* Compare */
+#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
+#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
+#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
+#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
+#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
+#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
+#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
+#define _CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */
+#define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
+#define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */
+#define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
+#define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */
+#define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */
+#define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */
+#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
+#define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */
+#define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */
+#define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */
+#define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */
+#define _CMP_UNORD_S 0x13 /* Unordered (signaling) */
+#define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */
+#define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */
+#define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */
+#define _CMP_ORD_S 0x17 /* Ordered (signaling) */
+#define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */
+#define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */
+#define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */
+#define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */
+#define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */
+#define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */
+#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
+#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
+
+#define _mm_cmp_pd(a, b, c) __extension__ ({ \
+ (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (c)); })
+
+#define _mm_cmp_ps(a, b, c) __extension__ ({ \
+ (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (c)); })
+
+#define _mm256_cmp_pd(a, b, c) __extension__ ({ \
+ (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (c)); })
+
+#define _mm256_cmp_ps(a, b, c) __extension__ ({ \
+ (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (c)); })
+
+#define _mm_cmp_sd(a, b, c) __extension__ ({ \
+ (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (c)); })
+
+#define _mm_cmp_ss(a, b, c) __extension__ ({ \
+ (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (c)); })
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_extract_epi32(__m256i __a, const int __imm)
+{
+ __v8si __b = (__v8si)__a;
+ return __b[__imm & 7];
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_extract_epi16(__m256i __a, const int __imm)
+{
+ __v16hi __b = (__v16hi)__a;
+ return __b[__imm & 15];
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_extract_epi8(__m256i __a, const int __imm)
+{
+ __v32qi __b = (__v32qi)__a;
+ return __b[__imm & 31];
+}
+
+#ifdef __x86_64__
+static __inline long long __DEFAULT_FN_ATTRS
+_mm256_extract_epi64(__m256i __a, const int __imm)
+{
+ __v4di __b = (__v4di)__a;
+ return __b[__imm & 3];
+}
+#endif
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi32(__m256i __a, int __b, int const __imm)
+{
+ __v8si __c = (__v8si)__a;
+ __c[__imm & 7] = __b;
+ return (__m256i)__c;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi16(__m256i __a, int __b, int const __imm)
+{
+ __v16hi __c = (__v16hi)__a;
+ __c[__imm & 15] = __b;
+ return (__m256i)__c;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi8(__m256i __a, int __b, int const __imm)
+{
+ __v32qi __c = (__v32qi)__a;
+ __c[__imm & 31] = __b;
+ return (__m256i)__c;
+}
+
+#ifdef __x86_64__
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi64(__m256i __a, long long __b, int const __imm)
+{
+ __v4di __c = (__v4di)__a;
+ __c[__imm & 3] = __b;
+ return (__m256i)__c;
+}
+#endif
+
+/* Conversion */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_pd(__m128i __a)
+{
+ return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) __a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_ps(__m256i __a)
+{
+ return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtpd_ps(__m256d __a)
+{
+ return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epi32(__m256 __a)
+{
+ return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtps_pd(__m128 __a)
+{
+ return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) __a);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epi32(__m256d __a)
+{
+ return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epi32(__m256d __a)
+{
+ return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epi32(__m256 __a)
+{
+ return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);
+}
+
+/* Vector replicate */
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_movehdup_ps(__m256 __a)
+{
+ return __builtin_shufflevector(__a, __a, 1, 1, 3, 3, 5, 5, 7, 7);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_moveldup_ps(__m256 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 0, 2, 2, 4, 4, 6, 6);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_movedup_pd(__m256d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 0, 2, 2);
+}
+
+/* Unpack and Interleave */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_unpackhi_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_shufflevector(__a, __b, 1, 5, 1+2, 5+2);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_unpacklo_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 4, 0+2, 4+2);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_unpackhi_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_shufflevector(__a, __b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_unpacklo_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
+}
+
+/* Bit Test */
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testz_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testc_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testnzc_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testz_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testc_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testnzc_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testz_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testc_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testnzc_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testz_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testc_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testnzc_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testz_si256(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testc_si256(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testnzc_si256(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b);
+}
+
+/* Vector extract sign mask */
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_movemask_pd(__m256d __a)
+{
+ return __builtin_ia32_movmskpd256((__v4df)__a);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_movemask_ps(__m256 __a)
+{
+ return __builtin_ia32_movmskps256((__v8sf)__a);
+}
+
+/* Vector __zero */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_zeroall(void)
+{
+ __builtin_ia32_vzeroall();
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_zeroupper(void)
+{
+ __builtin_ia32_vzeroupper();
+}
+
+/* Vector load with broadcast */
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_broadcast_ss(float const *__a)
+{
+ float __f = *__a;
+ return (__m128)(__v4sf){ __f, __f, __f, __f };
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcast_sd(double const *__a)
+{
+ double __d = *__a;
+ return (__m256d)(__v4df){ __d, __d, __d, __d };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcast_ss(float const *__a)
+{
+ float __f = *__a;
+ return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcast_pd(__m128d const *__a)
+{
+ return (__m256d)__builtin_ia32_vbroadcastf128_pd256(__a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcast_ps(__m128 const *__a)
+{
+ return (__m256)__builtin_ia32_vbroadcastf128_ps256(__a);
+}
+
+/* SIMD load ops */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_load_pd(double const *__p)
+{
+ return *(__m256d *)__p;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_load_ps(float const *__p)
+{
+ return *(__m256 *)__p;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_loadu_pd(double const *__p)
+{
+ struct __loadu_pd {
+ __m256d __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_pd*)__p)->__v;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_loadu_ps(float const *__p)
+{
+ struct __loadu_ps {
+ __m256 __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_ps*)__p)->__v;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_load_si256(__m256i const *__p)
+{
+ return *__p;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_loadu_si256(__m256i const *__p)
+{
+ struct __loadu_si256 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_si256*)__p)->__v;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_lddqu_si256(__m256i const *__p)
+{
+ return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
+}
+
+/* SIMD store ops */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_store_pd(double *__p, __m256d __a)
+{
+ *(__m256d *)__p = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_store_ps(float *__p, __m256 __a)
+{
+ *(__m256 *)__p = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu_pd(double *__p, __m256d __a)
+{
+ __builtin_ia32_storeupd256(__p, (__v4df)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu_ps(float *__p, __m256 __a)
+{
+ __builtin_ia32_storeups256(__p, (__v8sf)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_store_si256(__m256i *__p, __m256i __a)
+{
+ *__p = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu_si256(__m256i *__p, __m256i __a)
+{
+ __builtin_ia32_storedqu256((char *)__p, (__v32qi)__a);
+}
+
+/* Conditional load ops */
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm_maskload_pd(double const *__p, __m128i __m)
+{
+ return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_maskload_pd(double const *__p, __m256i __m)
+{
+ return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p,
+ (__v4di)__m);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_maskload_ps(float const *__p, __m128i __m)
+{
+ return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_maskload_ps(float const *__p, __m256i __m)
+{
+ return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m);
+}
+
+/* Conditional store ops */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
+{
+ __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
+{
+ __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
+{
+ __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
+{
+ __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a);
+}
+
+/* Cacheability support ops */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_stream_si256(__m256i *__a, __m256i __b)
+{
+ __builtin_ia32_movntdq256((__v4di *)__a, (__v4di)__b);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_stream_pd(double *__a, __m256d __b)
+{
+ __builtin_ia32_movntpd256(__a, (__v4df)__b);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_stream_ps(float *__p, __m256 __a)
+{
+ __builtin_ia32_movntps256(__p, (__v8sf)__a);
+}
+
+/* Create vectors */
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_undefined_pd()
+{
+ return (__m256d)__builtin_ia32_undef256();
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_undefined_ps()
+{
+ return (__m256)__builtin_ia32_undef256();
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_undefined_si256()
+{
+ return (__m256i)__builtin_ia32_undef256();
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_set_pd(double __a, double __b, double __c, double __d)
+{
+ return (__m256d){ __d, __c, __b, __a };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_set_ps(float __a, float __b, float __c, float __d,
+ float __e, float __f, float __g, float __h)
+{
+ return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
+ int __i4, int __i5, int __i6, int __i7)
+{
+ return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
+ short __w11, short __w10, short __w09, short __w08,
+ short __w07, short __w06, short __w05, short __w04,
+ short __w03, short __w02, short __w01, short __w00)
+{
+ return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
+ __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
+ char __b27, char __b26, char __b25, char __b24,
+ char __b23, char __b22, char __b21, char __b20,
+ char __b19, char __b18, char __b17, char __b16,
+ char __b15, char __b14, char __b13, char __b12,
+ char __b11, char __b10, char __b09, char __b08,
+ char __b07, char __b06, char __b05, char __b04,
+ char __b03, char __b02, char __b01, char __b00)
+{
+ return (__m256i)(__v32qi){
+ __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
+ __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
+ __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
+ __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31
+ };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
+{
+ return (__m256i)(__v4di){ __d, __c, __b, __a };
+}
+
+/* Create vectors with elements in reverse order */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_setr_pd(double __a, double __b, double __c, double __d)
+{
+ return (__m256d){ __a, __b, __c, __d };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_setr_ps(float __a, float __b, float __c, float __d,
+ float __e, float __f, float __g, float __h)
+{
+ return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
+ int __i4, int __i5, int __i6, int __i7)
+{
+ return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
+ short __w11, short __w10, short __w09, short __w08,
+ short __w07, short __w06, short __w05, short __w04,
+ short __w03, short __w02, short __w01, short __w00)
+{
+ return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09,
+ __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
+ char __b27, char __b26, char __b25, char __b24,
+ char __b23, char __b22, char __b21, char __b20,
+ char __b19, char __b18, char __b17, char __b16,
+ char __b15, char __b14, char __b13, char __b12,
+ char __b11, char __b10, char __b09, char __b08,
+ char __b07, char __b06, char __b05, char __b04,
+ char __b03, char __b02, char __b01, char __b00)
+{
+ return (__m256i)(__v32qi){
+ __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24,
+ __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16,
+ __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08,
+ __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
+{
+ return (__m256i)(__v4di){ __a, __b, __c, __d };
+}
+
+/* Create vectors with repeated elements */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_set1_pd(double __w)
+{
+ return (__m256d){ __w, __w, __w, __w };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_set1_ps(float __w)
+{
+ return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi32(int __i)
+{
+ return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi16(short __w)
+{
+ return (__m256i)(__v16hi){ __w, __w, __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi8(char __b)
+{
+ return (__m256i)(__v32qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b, __b, __b };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi64x(long long __q)
+{
+ return (__m256i)(__v4di){ __q, __q, __q, __q };
+}
+
+/* Create __zeroed vectors */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_setzero_pd(void)
+{
+ return (__m256d){ 0, 0, 0, 0 };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_setzero_ps(void)
+{
+ return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setzero_si256(void)
+{
+ return (__m256i){ 0LL, 0LL, 0LL, 0LL };
+}
+
+/* Cast between vector types */
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_castpd_ps(__m256d __a)
+{
+ return (__m256)__a;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_castpd_si256(__m256d __a)
+{
+ return (__m256i)__a;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_castps_pd(__m256 __a)
+{
+ return (__m256d)__a;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_castps_si256(__m256 __a)
+{
+ return (__m256i)__a;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_castsi256_ps(__m256i __a)
+{
+ return (__m256)__a;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_castsi256_pd(__m256i __a)
+{
+ return (__m256d)__a;
+}
+
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm256_castpd256_pd128(__m256d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm256_castps256_ps128(__m256 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_castsi256_si128(__m256i __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_castpd128_pd256(__m128d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_castps128_ps256(__m128 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_castsi128_si256(__m128i __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
+}
+
+/*
+ Vector insert.
+ We use macros rather than inlines because we only want to accept
+ invocations where the immediate M is a constant expression.
+*/
+#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_shufflevector( \
+ (__v8sf)(__m256)(V1), \
+ (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
+ (((M) & 1) ? 0 : 8), \
+ (((M) & 1) ? 1 : 9), \
+ (((M) & 1) ? 2 : 10), \
+ (((M) & 1) ? 3 : 11), \
+ (((M) & 1) ? 8 : 4), \
+ (((M) & 1) ? 9 : 5), \
+ (((M) & 1) ? 10 : 6), \
+ (((M) & 1) ? 11 : 7) );})
+
+#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
+ (__m256d)__builtin_shufflevector( \
+ (__v4df)(__m256d)(V1), \
+ (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
+ (((M) & 1) ? 0 : 4), \
+ (((M) & 1) ? 1 : 5), \
+ (((M) & 1) ? 4 : 2), \
+ (((M) & 1) ? 5 : 3) );})
+
+#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector( \
+ (__v4di)(__m256i)(V1), \
+ (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
+ (((M) & 1) ? 0 : 4), \
+ (((M) & 1) ? 1 : 5), \
+ (((M) & 1) ? 4 : 2), \
+ (((M) & 1) ? 5 : 3) );})
+
+/*
+ Vector extract.
+ We use macros rather than inlines because we only want to accept
+ invocations where the immediate M is a constant expression.
+*/
+#define _mm256_extractf128_ps(V, M) __extension__ ({ \
+ (__m128)__builtin_shufflevector( \
+ (__v8sf)(__m256)(V), \
+ (__v8sf)(_mm256_setzero_ps()), \
+ (((M) & 1) ? 4 : 0), \
+ (((M) & 1) ? 5 : 1), \
+ (((M) & 1) ? 6 : 2), \
+ (((M) & 1) ? 7 : 3) );})
+
+#define _mm256_extractf128_pd(V, M) __extension__ ({ \
+ (__m128d)__builtin_shufflevector( \
+ (__v4df)(__m256d)(V), \
+ (__v4df)(_mm256_setzero_pd()), \
+ (((M) & 1) ? 2 : 0), \
+ (((M) & 1) ? 3 : 1) );})
+
+#define _mm256_extractf128_si256(V, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector( \
+ (__v4di)(__m256i)(V), \
+ (__v4di)(_mm256_setzero_si256()), \
+ (((M) & 1) ? 2 : 0), \
+ (((M) & 1) ? 3 : 1) );})
+
+/* SIMD load ops (unaligned) */
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
+{
+ struct __loadu_ps {
+ __m128 __v;
+ } __attribute__((__packed__, __may_alias__));
+
+ __m256 __v256 = _mm256_castps128_ps256(((struct __loadu_ps*)__addr_lo)->__v);
+ return _mm256_insertf128_ps(__v256, ((struct __loadu_ps*)__addr_hi)->__v, 1);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
+{
+ struct __loadu_pd {
+ __m128d __v;
+ } __attribute__((__packed__, __may_alias__));
+
+ __m256d __v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)__addr_lo)->__v);
+ return _mm256_insertf128_pd(__v256, ((struct __loadu_pd*)__addr_hi)->__v, 1);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
+{
+ struct __loadu_si128 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ __m256i __v256 = _mm256_castsi128_si256(
+ ((struct __loadu_si128*)__addr_lo)->__v);
+ return _mm256_insertf128_si256(__v256,
+ ((struct __loadu_si128*)__addr_hi)->__v, 1);
+}
+
+/* SIMD store ops (unaligned) */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
+{
+ __m128 __v128;
+
+ __v128 = _mm256_castps256_ps128(__a);
+ __builtin_ia32_storeups(__addr_lo, __v128);
+ __v128 = _mm256_extractf128_ps(__a, 1);
+ __builtin_ia32_storeups(__addr_hi, __v128);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
+{
+ __m128d __v128;
+
+ __v128 = _mm256_castpd256_pd128(__a);
+ __builtin_ia32_storeupd(__addr_lo, __v128);
+ __v128 = _mm256_extractf128_pd(__a, 1);
+ __builtin_ia32_storeupd(__addr_hi, __v128);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
+{
+ __m128i __v128;
+
+ __v128 = _mm256_castsi256_si128(__a);
+ __builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128);
+ __v128 = _mm256_extractf128_si256(__a, 1);
+ __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_set_m128 (__m128 __hi, __m128 __lo) {
+ return (__m256) __builtin_shufflevector(__lo, __hi, 0, 1, 2, 3, 4, 5, 6, 7);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_set_m128d (__m128d __hi, __m128d __lo) {
+ return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_m128i (__m128i __hi, __m128i __lo) {
+ return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_setr_m128 (__m128 __lo, __m128 __hi) {
+ return _mm256_set_m128(__hi, __lo);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_setr_m128d (__m128d __lo, __m128d __hi) {
+ return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_m128i (__m128i __lo, __m128i __hi) {
+ return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVXINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/bmi2intrin.h b/clang-2690385/lib64/clang/3.8/include/bmi2intrin.h
new file mode 100644
index 00000000..fdae82cf
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/bmi2intrin.h
@@ -0,0 +1,95 @@
+/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI2INTRIN_H
+#define __BMI2INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2")))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bzhi_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bzhi_si(__X, __Y);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_pdep_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pdep_si(__X, __Y);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_pext_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pext_si(__X, __Y);
+}
+
+#ifdef __x86_64__
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bzhi_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bzhi_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_pdep_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pdep_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_pext_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pext_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mulx_u64 (unsigned long long __X, unsigned long long __Y,
+ unsigned long long *__P)
+{
+ unsigned __int128 __res = (unsigned __int128) __X * __Y;
+ *__P = (unsigned long long) (__res >> 64);
+ return (unsigned long long) __res;
+}
+
+#else /* !__x86_64__ */
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P)
+{
+ unsigned long long __res = (unsigned long long) __X * __Y;
+ *__P = (unsigned int) (__res >> 32);
+ return (unsigned int) __res;
+}
+
+#endif /* !__x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __BMI2INTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/bmiintrin.h b/clang-2690385/lib64/clang/3.8/include/bmiintrin.h
new file mode 100644
index 00000000..da98792d
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/bmiintrin.h
@@ -0,0 +1,155 @@
+/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMIINTRIN_H
+#define __BMIINTRIN_H
+
+#define _tzcnt_u16(a) (__tzcnt_u16((a)))
+#define _andn_u32(a, b) (__andn_u32((a), (b)))
+/* _bextr_u32 != __bextr_u32 */
+#define _blsi_u32(a) (__blsi_u32((a)))
+#define _blsmsk_u32(a) (__blsmsk_u32((a)))
+#define _blsr_u32(a) (__blsr_u32((a)))
+#define _tzcnt_u32(a) (__tzcnt_u32((a)))
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
+
+/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT
+ instruction behaves as BSF on non-BMI targets, there is code that expects
+ to use it as a potentially faster version of BSF. */
+#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+static __inline__ unsigned short __RELAXED_FN_ATTRS
+__tzcnt_u16(unsigned short __X)
+{
+ return __X ? __builtin_ctzs(__X) : 16;
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__andn_u32(unsigned int __X, unsigned int __Y)
+{
+ return ~__X & __Y;
+}
+
+/* AMD-specified, double-leading-underscore version of BEXTR */
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__bextr_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
+/* Intel-specified, single-leading-underscore version of BEXTR */
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
+{
+ return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsi_u32(unsigned int __X)
+{
+ return __X & -__X;
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsmsk_u32(unsigned int __X)
+{
+ return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsr_u32(unsigned int __X)
+{
+ return __X & (__X - 1);
+}
+
+static __inline__ unsigned int __RELAXED_FN_ATTRS
+__tzcnt_u32(unsigned int __X)
+{
+ return __X ? __builtin_ctz(__X) : 32;
+}
+
+#ifdef __x86_64__
+
+#define _andn_u64(a, b) (__andn_u64((a), (b)))
+/* _bextr_u64 != __bextr_u64 */
+#define _blsi_u64(a) (__blsi_u64((a)))
+#define _blsmsk_u64(a) (__blsmsk_u64((a)))
+#define _blsr_u64(a) (__blsr_u64((a)))
+#define _tzcnt_u64(a) (__tzcnt_u64((a)))
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__andn_u64 (unsigned long long __X, unsigned long long __Y)
+{
+ return ~__X & __Y;
+}
+
+/* AMD-specified, double-leading-underscore version of BEXTR */
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__bextr_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
+/* Intel-specified, single-leading-underscore version of BEXTR */
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
+{
+ return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsi_u64(unsigned long long __X)
+{
+ return __X & -__X;
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsmsk_u64(unsigned long long __X)
+{
+ return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsr_u64(unsigned long long __X)
+{
+ return __X & (__X - 1);
+}
+
+static __inline__ unsigned long long __RELAXED_FN_ATTRS
+__tzcnt_u64(unsigned long long __X)
+{
+ return __X ? __builtin_ctzll(__X) : 64;
+}
+
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+#undef __RELAXED_FN_ATTRS
+
+#endif /* __BMIINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/cpuid.h b/clang-2690385/lib64/clang/3.8/include/cpuid.h
new file mode 100644
index 00000000..5da02e0e
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/cpuid.h
@@ -0,0 +1,209 @@
+/*===---- cpuid.h - X86 cpu model detection --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !(__x86_64__ || __i386__)
+#error this header is for x86 only
+#endif
+
+/* Responses identification request with %eax 0 */
+/* AMD: "AuthenticAMD" */
+#define signature_AMD_ebx 0x68747541
+#define signature_AMD_edx 0x69746e65
+#define signature_AMD_ecx 0x444d4163
+/* CENTAUR: "CentaurHauls" */
+#define signature_CENTAUR_ebx 0x746e6543
+#define signature_CENTAUR_edx 0x48727561
+#define signature_CENTAUR_ecx 0x736c7561
+/* CYRIX: "CyrixInstead" */
+#define signature_CYRIX_ebx 0x69727943
+#define signature_CYRIX_edx 0x736e4978
+#define signature_CYRIX_ecx 0x64616574
+/* INTEL: "GenuineIntel" */
+#define signature_INTEL_ebx 0x756e6547
+#define signature_INTEL_edx 0x49656e69
+#define signature_INTEL_ecx 0x6c65746e
+/* TM1: "TransmetaCPU" */
+#define signature_TM1_ebx 0x6e617254
+#define signature_TM1_edx 0x74656d73
+#define signature_TM1_ecx 0x55504361
+/* TM2: "GenuineTMx86" */
+#define signature_TM2_ebx 0x756e6547
+#define signature_TM2_edx 0x54656e69
+#define signature_TM2_ecx 0x3638784d
+/* NSC: "Geode by NSC" */
+#define signature_NSC_ebx 0x646f6547
+#define signature_NSC_edx 0x43534e20
+#define signature_NSC_ecx 0x79622065
+/* NEXGEN: "NexGenDriven" */
+#define signature_NEXGEN_ebx 0x4778654e
+#define signature_NEXGEN_edx 0x72446e65
+#define signature_NEXGEN_ecx 0x6e657669
+/* RISE: "RiseRiseRise" */
+#define signature_RISE_ebx 0x65736952
+#define signature_RISE_edx 0x65736952
+#define signature_RISE_ecx 0x65736952
+/* SIS: "SiS SiS SiS " */
+#define signature_SIS_ebx 0x20536953
+#define signature_SIS_edx 0x20536953
+#define signature_SIS_ecx 0x20536953
+/* UMC: "UMC UMC UMC " */
+#define signature_UMC_ebx 0x20434d55
+#define signature_UMC_edx 0x20434d55
+#define signature_UMC_ecx 0x20434d55
+/* VIA: "VIA VIA VIA " */
+#define signature_VIA_ebx 0x20414956
+#define signature_VIA_edx 0x20414956
+#define signature_VIA_ecx 0x20414956
+/* VORTEX: "Vortex86 SoC" */
+#define signature_VORTEX_ebx 0x74726f56
+#define signature_VORTEX_edx 0x36387865
+#define signature_VORTEX_ecx 0x436f5320
+
+/* Features in %ecx for level 1 */
+#define bit_SSE3 0x00000001
+#define bit_PCLMULQDQ 0x00000002
+#define bit_DTES64 0x00000004
+#define bit_MONITOR 0x00000008
+#define bit_DSCPL 0x00000010
+#define bit_VMX 0x00000020
+#define bit_SMX 0x00000040
+#define bit_EIST 0x00000080
+#define bit_TM2 0x00000100
+#define bit_SSSE3 0x00000200
+#define bit_CNXTID 0x00000400
+#define bit_FMA 0x00001000
+#define bit_CMPXCHG16B 0x00002000
+#define bit_xTPR 0x00004000
+#define bit_PDCM 0x00008000
+#define bit_PCID 0x00020000
+#define bit_DCA 0x00040000
+#define bit_SSE41 0x00080000
+#define bit_SSE42 0x00100000
+#define bit_x2APIC 0x00200000
+#define bit_MOVBE 0x00400000
+#define bit_POPCNT 0x00800000
+#define bit_TSCDeadline 0x01000000
+#define bit_AESNI 0x02000000
+#define bit_XSAVE 0x04000000
+#define bit_OSXSAVE 0x08000000
+#define bit_AVX 0x10000000
+#define bit_RDRND 0x40000000
+
+/* Features in %edx for level 1 */
+#define bit_FPU 0x00000001
+#define bit_VME 0x00000002
+#define bit_DE 0x00000004
+#define bit_PSE 0x00000008
+#define bit_TSC 0x00000010
+#define bit_MSR 0x00000020
+#define bit_PAE 0x00000040
+#define bit_MCE 0x00000080
+#define bit_CX8 0x00000100
+#define bit_APIC 0x00000200
+#define bit_SEP 0x00000800
+#define bit_MTRR 0x00001000
+#define bit_PGE 0x00002000
+#define bit_MCA 0x00004000
+#define bit_CMOV 0x00008000
+#define bit_PAT 0x00010000
+#define bit_PSE36 0x00020000
+#define bit_PSN 0x00040000
+#define bit_CLFSH 0x00080000
+#define bit_DS 0x00200000
+#define bit_ACPI 0x00400000
+#define bit_MMX 0x00800000
+#define bit_FXSR 0x01000000
+#define bit_FXSAVE bit_FXSR /* for gcc compat */
+#define bit_SSE 0x02000000
+#define bit_SSE2 0x04000000
+#define bit_SS 0x08000000
+#define bit_HTT 0x10000000
+#define bit_TM 0x20000000
+#define bit_PBE 0x80000000
+
+/* Features in %ebx for level 7 sub-leaf 0 */
+#define bit_FSGSBASE 0x00000001
+#define bit_SMEP 0x00000080
+#define bit_ENH_MOVSB 0x00000200
+
+#if __i386__
+#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
+ __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level))
+
+#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
+ __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level), "2"(__count))
+#else
+/* x86-64 uses %rbx as the base register, so preserve it. */
+#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
+ __asm(" xchgq %%rbx,%q1\n" \
+ " cpuid\n" \
+ " xchgq %%rbx,%q1" \
+ : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level))
+
+#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
+ __asm(" xchgq %%rbx,%q1\n" \
+ " cpuid\n" \
+ " xchgq %%rbx,%q1" \
+ : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level), "2"(__count))
+#endif
+
+static __inline int __get_cpuid (unsigned int __level, unsigned int *__eax,
+ unsigned int *__ebx, unsigned int *__ecx,
+ unsigned int *__edx) {
+ __cpuid(__level, *__eax, *__ebx, *__ecx, *__edx);
+ return 1;
+}
+
+static __inline int __get_cpuid_max (unsigned int __level, unsigned int *__sig)
+{
+ unsigned int __eax, __ebx, __ecx, __edx;
+#if __i386__
+ int __cpuid_supported;
+
+ __asm(" pushfl\n"
+ " popl %%eax\n"
+ " movl %%eax,%%ecx\n"
+ " xorl $0x00200000,%%eax\n"
+ " pushl %%eax\n"
+ " popfl\n"
+ " pushfl\n"
+ " popl %%eax\n"
+ " movl $0,%0\n"
+ " cmpl %%eax,%%ecx\n"
+ " je 1f\n"
+ " movl $1,%0\n"
+ "1:"
+ : "=r" (__cpuid_supported) : : "eax", "ecx");
+ if (!__cpuid_supported)
+ return 0;
+#endif
+
+ __cpuid(__level, __eax, __ebx, __ecx, __edx);
+ if (__sig)
+ *__sig = __ebx;
+ return __eax;
+}
diff --git a/clang-2690385/lib64/clang/3.8/include/cuda_builtin_vars.h b/clang-2690385/lib64/clang/3.8/include/cuda_builtin_vars.h
new file mode 100644
index 00000000..901356b3
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/cuda_builtin_vars.h
@@ -0,0 +1,110 @@
+/*===---- cuda_builtin_vars.h - CUDA built-in variables ---------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CUDA_BUILTIN_VARS_H
+#define __CUDA_BUILTIN_VARS_H
+
+// The file implements built-in CUDA variables using __declspec(property).
+// https://msdn.microsoft.com/en-us/library/yhfk0thd.aspx
+// All read accesses of built-in variable fields get converted into calls to a
+// getter function which in turn would call appropriate builtin to fetch the
+// value.
+//
+// Example:
+// int x = threadIdx.x;
+// IR output:
+// %0 = call i32 @llvm.ptx.read.tid.x() #3
+// PTX output:
+// mov.u32 %r2, %tid.x;
+
+#define __CUDA_DEVICE_BUILTIN(FIELD, INTRINSIC) \
+ __declspec(property(get = __fetch_builtin_##FIELD)) unsigned int FIELD; \
+ static inline __attribute__((always_inline)) \
+ __attribute__((device)) unsigned int __fetch_builtin_##FIELD(void) { \
+ return INTRINSIC; \
+ }
+
+#if __cplusplus >= 201103L
+#define __DELETE =delete
+#else
+#define __DELETE
+#endif
+
+// Make sure nobody can create instances of the special varible types. nvcc
+// also disallows taking address of special variables, so we disable address-of
+// operator as well.
+#define __CUDA_DISALLOW_BUILTINVAR_ACCESS(TypeName) \
+ __attribute__((device)) TypeName() __DELETE; \
+ __attribute__((device)) TypeName(const TypeName &) __DELETE; \
+ __attribute__((device)) void operator=(const TypeName &) const __DELETE; \
+ __attribute__((device)) TypeName *operator&() const __DELETE
+
+struct __cuda_builtin_threadIdx_t {
+ __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_tid_x());
+ __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_tid_y());
+ __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_tid_z());
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t);
+};
+
+struct __cuda_builtin_blockIdx_t {
+ __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_ctaid_x());
+ __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_ctaid_y());
+ __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_ctaid_z());
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t);
+};
+
+struct __cuda_builtin_blockDim_t {
+ __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_ntid_x());
+ __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_ntid_y());
+ __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_ntid_z());
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t);
+};
+
+struct __cuda_builtin_gridDim_t {
+ __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_nctaid_x());
+ __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_nctaid_y());
+ __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_nctaid_z());
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t);
+};
+
+#define __CUDA_BUILTIN_VAR \
+ extern const __attribute__((device)) __attribute__((weak))
+__CUDA_BUILTIN_VAR __cuda_builtin_threadIdx_t threadIdx;
+__CUDA_BUILTIN_VAR __cuda_builtin_blockIdx_t blockIdx;
+__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim;
+__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim;
+
+// warpSize should translate to read of %WARP_SZ but there's currently no
+// builtin to do so. According to PTX v4.2 docs 'to date, all target
+// architectures have a WARP_SZ value of 32'.
+__attribute__((device)) const int warpSize = 32;
+
+#undef __CUDA_DEVICE_BUILTIN
+#undef __CUDA_BUILTIN_VAR
+#undef __CUDA_DISALLOW_BUILTINVAR_ACCESS
+
+#endif /* __CUDA_BUILTIN_VARS_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/emmintrin.h b/clang-2690385/lib64/clang/3.8/include/emmintrin.h
new file mode 100644
index 00000000..cfc2c716
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/emmintrin.h
@@ -0,0 +1,1491 @@
+/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __EMMINTRIN_H
+#define __EMMINTRIN_H
+
+#include <xmmintrin.h>
+
+typedef double __m128d __attribute__((__vector_size__(16)));
+typedef long long __m128i __attribute__((__vector_size__(16)));
+
+/* Type defines. */
+typedef double __v2df __attribute__ ((__vector_size__ (16)));
+typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+typedef short __v8hi __attribute__((__vector_size__(16)));
+typedef char __v16qi __attribute__((__vector_size__(16)));
+
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v16qs __attribute__((__vector_size__(16)));
+
+#include <f16cintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_add_sd(__m128d __a, __m128d __b)
+{
+ __a[0] += __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_add_pd(__m128d __a, __m128d __b)
+{
+ return __a + __b;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sub_sd(__m128d __a, __m128d __b)
+{
+ __a[0] -= __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sub_pd(__m128d __a, __m128d __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mul_sd(__m128d __a, __m128d __b)
+{
+ __a[0] *= __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mul_pd(__m128d __a, __m128d __b)
+{
+ return __a * __b;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_div_sd(__m128d __a, __m128d __b)
+{
+ __a[0] /= __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_div_pd(__m128d __a, __m128d __b)
+{
+ return __a / __b;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sqrt_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_sqrtsd(__b);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sqrt_pd(__m128d __a)
+{
+ return __builtin_ia32_sqrtpd(__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_min_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_minsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_min_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_minpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_max_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_maxsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_max_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_maxpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_and_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v4si)__a & (__v4si)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_andnot_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)(~(__v4si)__a & (__v4si)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_or_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v4si)__a | (__v4si)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_xor_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v4si)__a ^ (__v4si)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpeq_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpeqpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmplt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpltpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmple_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmplepd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpgt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpltpd(__b, __a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpge_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmplepd(__b, __a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpord_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpordpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpunord_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpunordpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpneq_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpneqpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnlt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnltpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnle_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnlepd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpngt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnltpd(__b, __a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnge_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnlepd(__b, __a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpeq_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpeqsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmplt_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpltsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmple_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmplesd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpgt_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmpltsd(__b, __a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpge_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmplesd(__b, __a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpord_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpordsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpunord_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpunordsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpneq_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpneqsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnlt_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnltsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnle_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnlesd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpngt_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmpnltsd(__b, __a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnge_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmpnlesd(__b, __a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comieq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdeq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comilt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdlt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comile_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdle(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comigt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdgt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comige_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdge(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comineq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdneq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomieq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdeq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomilt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdlt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomile_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdle(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomigt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdgt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomige_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdge(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomineq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdneq(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpd_ps(__m128d __a)
+{
+ return __builtin_ia32_cvtpd2ps(__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtps_pd(__m128 __a)
+{
+ return __builtin_ia32_cvtps2pd(__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepi32_pd(__m128i __a)
+{
+ return __builtin_ia32_cvtdq2pd((__v4si)__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epi32(__m128d __a)
+{
+ return __builtin_ia32_cvtpd2dq(__a);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtsd_si32(__m128d __a)
+{
+ return __builtin_ia32_cvtsd2si(__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtsd_ss(__m128 __a, __m128d __b)
+{
+ __a[0] = __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtsi32_sd(__m128d __a, int __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtss_sd(__m128d __a, __m128 __b)
+{
+ __a[0] = __b[0];
+ return __a;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epi32(__m128d __a)
+{
+ return (__m128i)__builtin_ia32_cvttpd2dq(__a);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvttsd_si32(__m128d __a)
+{
+ return __a[0];
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtpd_pi32(__m128d __a)
+{
+ return (__m64)__builtin_ia32_cvtpd2pi(__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvttpd_pi32(__m128d __a)
+{
+ return (__m64)__builtin_ia32_cvttpd2pi(__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtpi32_pd(__m64 __a)
+{
+ return __builtin_ia32_cvtpi2pd((__v2si)__a);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS
+_mm_cvtsd_f64(__m128d __a)
+{
+ return __a[0];
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_load_pd(double const *__dp)
+{
+ return *(__m128d*)__dp;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_load1_pd(double const *__dp)
+{
+ struct __mm_load1_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_load1_pd_struct*)__dp)->__u;
+ return (__m128d){ __u, __u };
+}
+
+#define _mm_load_pd1(dp) _mm_load1_pd(dp)
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadr_pd(double const *__dp)
+{
+ __m128d __u = *(__m128d*)__dp;
+ return __builtin_shufflevector(__u, __u, 1, 0);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadu_pd(double const *__dp)
+{
+ struct __loadu_pd {
+ __m128d __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_pd*)__dp)->__v;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_load_sd(double const *__dp)
+{
+ struct __mm_load_sd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_load_sd_struct*)__dp)->__u;
+ return (__m128d){ __u, 0 };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadh_pd(__m128d __a, double const *__dp)
+{
+ struct __mm_loadh_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_loadh_pd_struct*)__dp)->__u;
+ return (__m128d){ __a[0], __u };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadl_pd(__m128d __a, double const *__dp)
+{
+ struct __mm_loadl_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_loadl_pd_struct*)__dp)->__u;
+ return (__m128d){ __u, __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_undefined_pd()
+{
+ return (__m128d)__builtin_ia32_undef128();
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_set_sd(double __w)
+{
+ return (__m128d){ __w, 0 };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_set1_pd(double __w)
+{
+ return (__m128d){ __w, __w };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_set_pd(double __w, double __x)
+{
+ return (__m128d){ __x, __w };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_setr_pd(double __w, double __x)
+{
+ return (__m128d){ __w, __x };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_setzero_pd(void)
+{
+ return (__m128d){ 0, 0 };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_move_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d){ __b[0], __a[1] };
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_sd(double *__dp, __m128d __a)
+{
+ struct __mm_store_sd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store1_pd(double *__dp, __m128d __a)
+{
+ struct __mm_store1_pd_struct {
+ double __u[2];
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store1_pd_struct*)__dp)->__u[0] = __a[0];
+ ((struct __mm_store1_pd_struct*)__dp)->__u[1] = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_pd(double *__dp, __m128d __a)
+{
+ *(__m128d *)__dp = __a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_pd(double *__dp, __m128d __a)
+{
+ __builtin_ia32_storeupd(__dp, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storer_pd(double *__dp, __m128d __a)
+{
+ __a = __builtin_shufflevector(__a, __a, 1, 0);
+ *(__m128d *)__dp = __a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeh_pd(double *__dp, __m128d __a)
+{
+ struct __mm_storeh_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storel_pd(double *__dp, __m128d __a)
+{
+ struct __mm_storeh_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v16qi)__a + (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a + (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4si)__a + (__v4si)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_si64(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_paddq(__a, __b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi64(__m128i __a, __m128i __b)
+{
+ return __a + __b;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_avg_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_avg_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_madd_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mulhi_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mulhi_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mullo_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a * (__v8hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mul_su32(__m64 __a, __m64 __b)
+{
+ return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mul_epu32(__m128i __a, __m128i __b)
+{
+ return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sad_epu8(__m128i __a, __m128i __b)
+{
+ return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v16qi)__a - (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a - (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4si)__a - (__v4si)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_si64(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psubq(__a, __b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi64(__m128i __a, __m128i __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_and_si128(__m128i __a, __m128i __b)
+{
+ return __a & __b;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_andnot_si128(__m128i __a, __m128i __b)
+{
+ return ~__a & __b;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_or_si128(__m128i __a, __m128i __b)
+{
+ return __a | __b;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_xor_si128(__m128i __a, __m128i __b)
+{
+ return __a ^ __b;
+}
+
+#define _mm_slli_si128(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v16qi)_mm_setzero_si128(), \
+ (__v16qi)(__m128i)(a), \
+ ((imm)&0xF0) ? 0 : 16 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 17 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 18 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 19 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 20 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 21 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 22 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 23 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 24 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 25 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 26 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 27 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 28 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 29 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 30 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 31 - ((imm)&0xF)); })
+
+#define _mm_bslli_si128(a, imm) \
+ _mm_slli_si128((a), (imm))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_slli_epi16(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sll_epi16(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_slli_epi32(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sll_epi32(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_slli_epi64(__m128i __a, int __count)
+{
+ return __builtin_ia32_psllqi128(__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sll_epi64(__m128i __a, __m128i __count)
+{
+ return __builtin_ia32_psllq128(__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srai_epi16(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sra_epi16(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srai_epi32(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sra_epi32(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
+}
+
+#define _mm_srli_si128(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v16qi)(__m128i)(a), \
+ (__v16qi)_mm_setzero_si128(), \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 0, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 1, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 2, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 3, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 4, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 5, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 6, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 7, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 8, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 9, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 10, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 11, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 12, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 13, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 14, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 15); })
+
+#define _mm_bsrli_si128(a, imm) \
+ _mm_srli_si128((a), (imm))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srli_epi16(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srl_epi16(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srli_epi32(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srl_epi32(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srli_epi64(__m128i __a, int __count)
+{
+ return __builtin_ia32_psrlqi128(__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srl_epi64(__m128i __a, __m128i __count)
+{
+ return __builtin_ia32_psrlq128(__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v16qi)__a == (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a == (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4si)__a == (__v4si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi8(__m128i __a, __m128i __b)
+{
+ /* This function always performs a signed comparison, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)((__v16qs)__a > (__v16qs)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a > (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4si)__a > (__v4si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmplt_epi8(__m128i __a, __m128i __b)
+{
+ return _mm_cmpgt_epi8(__b, __a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmplt_epi16(__m128i __a, __m128i __b)
+{
+ return _mm_cmpgt_epi16(__b, __a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmplt_epi32(__m128i __a, __m128i __b)
+{
+ return _mm_cmpgt_epi32(__b, __a);
+}
+
+#ifdef __x86_64__
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtsi64_sd(__m128d __a, long long __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtsd_si64(__m128d __a)
+{
+ return __builtin_ia32_cvtsd2si64(__a);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvttsd_si64(__m128d __a)
+{
+ return __a[0];
+}
+#endif
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepi32_ps(__m128i __a)
+{
+ return __builtin_ia32_cvtdq2ps((__v4si)__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epi32(__m128 __a)
+{
+ return (__m128i)__builtin_ia32_cvtps2dq(__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epi32(__m128 __a)
+{
+ return (__m128i)__builtin_ia32_cvttps2dq(__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsi32_si128(int __a)
+{
+ return (__m128i)(__v4si){ __a, 0, 0, 0 };
+}
+
+#ifdef __x86_64__
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsi64_si128(long long __a)
+{
+ return (__m128i){ __a, 0 };
+}
+#endif
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtsi128_si32(__m128i __a)
+{
+ __v4si __b = (__v4si)__a;
+ return __b[0];
+}
+
+#ifdef __x86_64__
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtsi128_si64(__m128i __a)
+{
+ return __a[0];
+}
+#endif
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_load_si128(__m128i const *__p)
+{
+ return *__p;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadu_si128(__m128i const *__p)
+{
+ struct __loadu_si128 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_si128*)__p)->__v;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadl_epi64(__m128i const *__p)
+{
+ struct __mm_loadl_epi64_struct {
+ long long __u;
+ } __attribute__((__packed__, __may_alias__));
+ return (__m128i) { ((struct __mm_loadl_epi64_struct*)__p)->__u, 0};
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_undefined_si128()
+{
+ return (__m128i)__builtin_ia32_undef128();
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi64x(long long __q1, long long __q0)
+{
+ return (__m128i){ __q0, __q1 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi64(__m64 __q1, __m64 __q0)
+{
+ return (__m128i){ (long long)__q0, (long long)__q1 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
+{
+ return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
+{
+ return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
+{
+ return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi64x(long long __q)
+{
+ return (__m128i){ __q, __q };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi64(__m64 __q)
+{
+ return (__m128i){ (long long)__q, (long long)__q };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi32(int __i)
+{
+ return (__m128i)(__v4si){ __i, __i, __i, __i };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi16(short __w)
+{
+ return (__m128i)(__v8hi){ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi8(char __b)
+{
+ return (__m128i)(__v16qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi64(__m64 __q0, __m64 __q1)
+{
+ return (__m128i){ (long long)__q0, (long long)__q1 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
+{
+ return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
+{
+ return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
+{
+ return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setzero_si128(void)
+{
+ return (__m128i){ 0LL, 0LL };
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_si128(__m128i *__p, __m128i __b)
+{
+ *__p = __b;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_si128(__m128i *__p, __m128i __b)
+{
+ __builtin_ia32_storedqu((char *)__p, (__v16qi)__b);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
+{
+ __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storel_epi64(__m128i *__p, __m128i __a)
+{
+ struct __mm_storel_epi64_struct {
+ long long __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_pd(double *__p, __m128d __a)
+{
+ __builtin_ia32_movntpd(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_si128(__m128i *__p, __m128i __a)
+{
+ __builtin_ia32_movntdq(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_si32(int *__p, int __a)
+{
+ __builtin_ia32_movnti(__p, __a);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_si64(long long *__p, long long __a)
+{
+ __builtin_ia32_movnti64(__p, __a);
+}
+#endif
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_clflush(void const *__p)
+{
+ __builtin_ia32_clflush(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_lfence(void)
+{
+ __builtin_ia32_lfence();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mfence(void)
+{
+ __builtin_ia32_mfence();
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packs_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packus_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_extract_epi16(__m128i __a, int __imm)
+{
+ __v8hi __b = (__v8hi)__a;
+ return (unsigned short)__b[__imm & 7];
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_insert_epi16(__m128i __a, int __b, int __imm)
+{
+ __v8hi __c = (__v8hi)__a;
+ __c[__imm & 7] = __b;
+ return (__m128i)__c;
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_epi8(__m128i __a)
+{
+ return __builtin_ia32_pmovmskb128((__v16qi)__a);
+}
+
+#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v4si)(__m128i)(a), \
+ (__v4si)_mm_setzero_si128(), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); })
+
+#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
+ (__v8hi)_mm_setzero_si128(), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7); })
+
+#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
+ (__v8hi)_mm_setzero_si128(), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector(__a, __b, 1, 2+1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector(__a, __b, 0, 2+0);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_movepi64_pi64(__m128i __a)
+{
+ return (__m64)__a[0];
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movpi64_epi64(__m64 __a)
+{
+ return (__m128i){ (long long)__a, 0 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_move_epi64(__m128i __a)
+{
+ return __builtin_shufflevector(__a, (__m128i){ 0 }, 0, 2);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_unpackhi_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_shufflevector(__a, __b, 1, 2+1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_unpacklo_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 2+0);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_pd(__m128d __a)
+{
+ return __builtin_ia32_movmskpd(__a);
+}
+
+#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+ (i) & 1, (((i) & 2) >> 1) + 2); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_castpd_ps(__m128d __a)
+{
+ return (__m128)__a;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_castpd_si128(__m128d __a)
+{
+ return (__m128i)__a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_castps_pd(__m128 __a)
+{
+ return (__m128d)__a;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_castps_si128(__m128 __a)
+{
+ return (__m128i)__a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_castsi128_ps(__m128i __a)
+{
+ return (__m128)__a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_castsi128_pd(__m128i __a)
+{
+ return (__m128d)__a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_pause(void)
+{
+ __builtin_ia32_pause();
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
+
+#endif /* __EMMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/f16cintrin.h b/clang-2690385/lib64/clang/3.8/include/f16cintrin.h
new file mode 100644
index 00000000..c655d98e
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/f16cintrin.h
@@ -0,0 +1,45 @@
+/*===---- f16cintrin.h - F16C intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __EMMINTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <f16cintrin.h> directly; include <emmintrin.h> instead."
+#endif
+
+#ifndef __F16CINTRIN_H
+#define __F16CINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
+
+#define _mm_cvtps_ph(a, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)); })
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_cvtph_ps(__m128i __a)
+{
+ return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __F16CINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/float.h b/clang-2690385/lib64/clang/3.8/include/float.h
new file mode 100644
index 00000000..238cf76b
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/float.h
@@ -0,0 +1,124 @@
+/*===---- float.h - Characteristics of floating point types ----------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __FLOAT_H
+#define __FLOAT_H
+
+/* If we're on MinGW, fall back to the system's float.h, which might have
+ * additional definitions provided for Windows.
+ * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
+ */
+#if (defined(__MINGW32__) || defined(_MSC_VER)) && __STDC_HOSTED__ && \
+ __has_include_next(<float.h>)
+# include_next <float.h>
+
+/* Undefine anything that we'll be redefining below. */
+# undef FLT_EVAL_METHOD
+# undef FLT_ROUNDS
+# undef FLT_RADIX
+# undef FLT_MANT_DIG
+# undef DBL_MANT_DIG
+# undef LDBL_MANT_DIG
+# undef DECIMAL_DIG
+# undef FLT_DIG
+# undef DBL_DIG
+# undef LDBL_DIG
+# undef FLT_MIN_EXP
+# undef DBL_MIN_EXP
+# undef LDBL_MIN_EXP
+# undef FLT_MIN_10_EXP
+# undef DBL_MIN_10_EXP
+# undef LDBL_MIN_10_EXP
+# undef FLT_MAX_EXP
+# undef DBL_MAX_EXP
+# undef LDBL_MAX_EXP
+# undef FLT_MAX_10_EXP
+# undef DBL_MAX_10_EXP
+# undef LDBL_MAX_10_EXP
+# undef FLT_MAX
+# undef DBL_MAX
+# undef LDBL_MAX
+# undef FLT_EPSILON
+# undef DBL_EPSILON
+# undef LDBL_EPSILON
+# undef FLT_MIN
+# undef DBL_MIN
+# undef LDBL_MIN
+# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# undef FLT_TRUE_MIN
+# undef DBL_TRUE_MIN
+# undef LDBL_TRUE_MIN
+# endif
+#endif
+
+/* Characteristics of floating point types, C99 5.2.4.2.2 */
+
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+#define FLT_ROUNDS (__builtin_flt_rounds())
+#define FLT_RADIX __FLT_RADIX__
+
+#define FLT_MANT_DIG __FLT_MANT_DIG__
+#define DBL_MANT_DIG __DBL_MANT_DIG__
+#define LDBL_MANT_DIG __LDBL_MANT_DIG__
+
+#define DECIMAL_DIG __DECIMAL_DIG__
+
+#define FLT_DIG __FLT_DIG__
+#define DBL_DIG __DBL_DIG__
+#define LDBL_DIG __LDBL_DIG__
+
+#define FLT_MIN_EXP __FLT_MIN_EXP__
+#define DBL_MIN_EXP __DBL_MIN_EXP__
+#define LDBL_MIN_EXP __LDBL_MIN_EXP__
+
+#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
+#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
+#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+
+#define FLT_MAX_EXP __FLT_MAX_EXP__
+#define DBL_MAX_EXP __DBL_MAX_EXP__
+#define LDBL_MAX_EXP __LDBL_MAX_EXP__
+
+#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
+#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
+#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+
+#define FLT_MAX __FLT_MAX__
+#define DBL_MAX __DBL_MAX__
+#define LDBL_MAX __LDBL_MAX__
+
+#define FLT_EPSILON __FLT_EPSILON__
+#define DBL_EPSILON __DBL_EPSILON__
+#define LDBL_EPSILON __LDBL_EPSILON__
+
+#define FLT_MIN __FLT_MIN__
+#define DBL_MIN __DBL_MIN__
+#define LDBL_MIN __LDBL_MIN__
+
+#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# define FLT_TRUE_MIN __FLT_DENORM_MIN__
+# define DBL_TRUE_MIN __DBL_DENORM_MIN__
+# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+#endif
+
+#endif /* __FLOAT_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/fma4intrin.h b/clang-2690385/lib64/clang/3.8/include/fma4intrin.h
new file mode 100644
index 00000000..f1178877
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/fma4intrin.h
@@ -0,0 +1,230 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __FMA4INTRIN_H
+#define __FMA4INTRIN_H
+
+#include <pmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma4")))
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __FMA4INTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/fmaintrin.h b/clang-2690385/lib64/clang/3.8/include/fmaintrin.h
new file mode 100644
index 00000000..114a1438
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/fmaintrin.h
@@ -0,0 +1,228 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <fmaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __FMAINTRIN_H
+#define __FMAINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma")))
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __FMAINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/fxsrintrin.h b/clang-2690385/lib64/clang/3.8/include/fxsrintrin.h
new file mode 100644
index 00000000..ac6026aa
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/fxsrintrin.h
@@ -0,0 +1,55 @@
+/*===---- fxsrintrin.h - FXSR intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <fxsrintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __FXSRINTRIN_H
+#define __FXSRINTRIN_H
+
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fxsr")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxsave(void *__p) {
+ return __builtin_ia32_fxsave(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxsave64(void *__p) {
+ return __builtin_ia32_fxsave64(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxrstor(void *__p) {
+ return __builtin_ia32_fxrstor(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxrstor64(void *__p) {
+ return __builtin_ia32_fxrstor64(__p);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/htmintrin.h b/clang-2690385/lib64/clang/3.8/include/htmintrin.h
new file mode 100644
index 00000000..0088c7cc
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/htmintrin.h
@@ -0,0 +1,226 @@
+/*===---- htmintrin.h - Standard header for PowerPC HTM ---------------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __HTMINTRIN_H
+#define __HTMINTRIN_H
+
+#ifndef __HTM__
+#error "HTM instruction set not enabled"
+#endif
+
+#ifdef __powerpc__
+
+#include <stdint.h>
+
+typedef uint64_t texasr_t;
+typedef uint32_t texasru_t;
+typedef uint32_t texasrl_t;
+typedef uintptr_t tfiar_t;
+typedef uintptr_t tfhar_t;
+
+#define _HTM_STATE(CR0) ((CR0 >> 1) & 0x3)
+#define _HTM_NONTRANSACTIONAL 0x0
+#define _HTM_SUSPENDED 0x1
+#define _HTM_TRANSACTIONAL 0x2
+
+#define _TEXASR_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \
+ (((TEXASR) >> (63-(BITNUM))) & ((1<<(SIZE))-1))
+#define _TEXASRU_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \
+ (((TEXASR) >> (31-(BITNUM))) & ((1<<(SIZE))-1))
+
+#define _TEXASR_FAILURE_CODE(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 7, 8)
+#define _TEXASRU_FAILURE_CODE(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 8)
+
+#define _TEXASR_FAILURE_PERSISTENT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 7, 1)
+#define _TEXASRU_FAILURE_PERSISTENT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 1)
+
+#define _TEXASR_DISALLOWED(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 8, 1)
+#define _TEXASRU_DISALLOWED(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 8, 1)
+
+#define _TEXASR_NESTING_OVERFLOW(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 9, 1)
+#define _TEXASRU_NESTING_OVERFLOW(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 9, 1)
+
+#define _TEXASR_FOOTPRINT_OVERFLOW(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 10, 1)
+#define _TEXASRU_FOOTPRINT_OVERFLOW(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 10, 1)
+
+#define _TEXASR_SELF_INDUCED_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 11, 1)
+#define _TEXASRU_SELF_INDUCED_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 11, 1)
+
+#define _TEXASR_NON_TRANSACTIONAL_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 12, 1)
+#define _TEXASRU_NON_TRANSACTIONAL_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 12, 1)
+
+#define _TEXASR_TRANSACTION_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 13, 1)
+#define _TEXASRU_TRANSACTION_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 13, 1)
+
+#define _TEXASR_TRANSLATION_INVALIDATION_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 14, 1)
+#define _TEXASRU_TRANSLATION_INVALIDATION_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 14, 1)
+
+#define _TEXASR_IMPLEMENTAION_SPECIFIC(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 15, 1)
+#define _TEXASRU_IMPLEMENTAION_SPECIFIC(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 15, 1)
+
+#define _TEXASR_INSTRUCTION_FETCH_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 16, 1)
+#define _TEXASRU_INSTRUCTION_FETCH_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 16, 1)
+
+#define _TEXASR_ABORT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 31, 1)
+#define _TEXASRU_ABORT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 31, 1)
+
+
+#define _TEXASR_SUSPENDED(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 32, 1)
+
+#define _TEXASR_PRIVILEGE(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 35, 2)
+
+#define _TEXASR_FAILURE_SUMMARY(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 36, 1)
+
+#define _TEXASR_TFIAR_EXACT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 37, 1)
+
+#define _TEXASR_ROT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 38, 1)
+
+#define _TEXASR_TRANSACTION_LEVEL(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 63, 12)
+
+#endif /* __powerpc */
+
+#ifdef __s390__
+
+/* Condition codes generated by tbegin */
+#define _HTM_TBEGIN_STARTED 0
+#define _HTM_TBEGIN_INDETERMINATE 1
+#define _HTM_TBEGIN_TRANSIENT 2
+#define _HTM_TBEGIN_PERSISTENT 3
+
+/* The abort codes below this threshold are reserved for machine use. */
+#define _HTM_FIRST_USER_ABORT_CODE 256
+
+/* The transaction diagnostic block is it is defined in the Principles
+ of Operation chapter 5-91. */
+
+struct __htm_tdb {
+ unsigned char format; /* 0 */
+ unsigned char flags;
+ unsigned char reserved1[4];
+ unsigned short nesting_depth;
+ unsigned long long abort_code; /* 8 */
+ unsigned long long conflict_token; /* 16 */
+ unsigned long long atia; /* 24 */
+ unsigned char eaid; /* 32 */
+ unsigned char dxc;
+ unsigned char reserved2[2];
+ unsigned int program_int_id;
+ unsigned long long exception_id; /* 40 */
+ unsigned long long bea; /* 48 */
+ unsigned char reserved3[72]; /* 56 */
+ unsigned long long gprs[16]; /* 128 */
+} __attribute__((__packed__, __aligned__ (8)));
+
+
+/* Helper intrinsics to retry tbegin in case of transient failure. */
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_null (int retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin(0)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_tdb (void *tdb, int retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin(tdb)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+#define __builtin_tbegin_retry(tdb, retry) \
+ (__builtin_constant_p(tdb == 0) && tdb == 0 ? \
+ __builtin_tbegin_retry_null(retry) : \
+ __builtin_tbegin_retry_tdb(tdb, retry))
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_nofloat_null (int retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin_nofloat(0)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_nofloat_tdb (void *tdb, int retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin_nofloat(tdb)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+#define __builtin_tbegin_retry_nofloat(tdb, retry) \
+ (__builtin_constant_p(tdb == 0) && tdb == 0 ? \
+ __builtin_tbegin_retry_nofloat_null(retry) : \
+ __builtin_tbegin_retry_nofloat_tdb(tdb, retry))
+
+#endif /* __s390__ */
+
+#endif /* __HTMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/htmxlintrin.h b/clang-2690385/lib64/clang/3.8/include/htmxlintrin.h
new file mode 100644
index 00000000..c7571ecd
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/htmxlintrin.h
@@ -0,0 +1,363 @@
+/*===---- htmxlintrin.h - XL compiler HTM execution intrinsics-------------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __HTMXLINTRIN_H
+#define __HTMXLINTRIN_H
+
+#ifndef __HTM__
+#error "HTM instruction set not enabled"
+#endif
+
+#include <htmintrin.h>
+
+#ifdef __powerpc__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define _TEXASR_PTR(TM_BUF) \
+ ((texasr_t *)((TM_BUF)+0))
+#define _TEXASRU_PTR(TM_BUF) \
+ ((texasru_t *)((TM_BUF)+0))
+#define _TEXASRL_PTR(TM_BUF) \
+ ((texasrl_t *)((TM_BUF)+4))
+#define _TFIAR_PTR(TM_BUF) \
+ ((tfiar_t *)((TM_BUF)+8))
+
+typedef char TM_buff_type[16];
+
+/* This macro can be used to determine whether a transaction was successfully
+ started from the __TM_begin() and __TM_simple_begin() intrinsic functions
+ below. */
+#define _HTM_TBEGIN_STARTED 1
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_simple_begin (void)
+{
+ if (__builtin_expect (__builtin_tbegin (0), 1))
+ return _HTM_TBEGIN_STARTED;
+ return 0;
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_begin (void* const TM_buff)
+{
+ *_TEXASRL_PTR (TM_buff) = 0;
+ if (__builtin_expect (__builtin_tbegin (0), 1))
+ return _HTM_TBEGIN_STARTED;
+#ifdef __powerpc64__
+ *_TEXASR_PTR (TM_buff) = __builtin_get_texasr ();
+#else
+ *_TEXASRU_PTR (TM_buff) = __builtin_get_texasru ();
+ *_TEXASRL_PTR (TM_buff) = __builtin_get_texasr ();
+#endif
+ *_TFIAR_PTR (TM_buff) = __builtin_get_tfiar ();
+ return 0;
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_end (void)
+{
+ if (__builtin_expect (__builtin_tend (0), 1))
+ return 1;
+ return 0;
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_abort (void)
+{
+ __builtin_tabort (0);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_named_abort (unsigned char const code)
+{
+ __builtin_tabort (code);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_resume (void)
+{
+ __builtin_tresume ();
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_suspend (void)
+{
+ __builtin_tsuspend ();
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_user_abort (void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ return _TEXASRU_ABORT (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_named_user_abort (void* const TM_buff, unsigned char *code)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+
+ *code = _TEXASRU_FAILURE_CODE (texasru);
+ return _TEXASRU_ABORT (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_illegal (void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ return _TEXASRU_DISALLOWED (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_footprint_exceeded (void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ return _TEXASRU_FOOTPRINT_OVERFLOW (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_nesting_depth (void* const TM_buff)
+{
+ texasrl_t texasrl;
+
+ if (_HTM_STATE (__builtin_ttest ()) == _HTM_NONTRANSACTIONAL)
+ {
+ texasrl = *_TEXASRL_PTR (TM_buff);
+ if (!_TEXASR_FAILURE_SUMMARY (texasrl))
+ texasrl = 0;
+ }
+ else
+ texasrl = (texasrl_t) __builtin_get_texasr ();
+
+ return _TEXASR_TRANSACTION_LEVEL (texasrl);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_nested_too_deep(void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ return _TEXASRU_NESTING_OVERFLOW (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_conflict(void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ /* Return TEXASR bits 11 (Self-Induced Conflict) through
+ 14 (Translation Invalidation Conflict). */
+ return (_TEXASRU_EXTRACT_BITS (texasru, 14, 4)) ? 1 : 0;
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_failure_persistent(void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ return _TEXASRU_FAILURE_PERSISTENT (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_failure_address(void* const TM_buff)
+{
+ return *_TFIAR_PTR (TM_buff);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_failure_code(void* const TM_buff)
+{
+ return *_TEXASR_PTR (TM_buff);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __powerpc__ */
+
+#ifdef __s390__
+
+#include <stdint.h>
+
+/* These intrinsics are being made available for compatibility with
+ the IBM XL compiler. For documentation please see the "z/OS XL
+ C/C++ Programming Guide" publically available on the web. */
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_simple_begin ()
+{
+ return __builtin_tbegin_nofloat (0);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_begin (void* const tdb)
+{
+ return __builtin_tbegin_nofloat (tdb);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_end ()
+{
+ return __builtin_tend ();
+}
+
+static __inline void __attribute__((__always_inline__))
+__TM_abort ()
+{
+ return __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+__TM_named_abort (unsigned char const code)
+{
+ return __builtin_tabort ((int)_HTM_FIRST_USER_ABORT_CODE + code);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+__TM_non_transactional_store (void* const addr, long long const value)
+{
+ __builtin_non_tx_store ((uint64_t*)addr, (uint64_t)value);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_nesting_depth (void* const tdb_ptr)
+{
+ int depth = __builtin_tx_nesting_depth ();
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ if (depth != 0)
+ return depth;
+
+ if (tdb->format != 1)
+ return 0;
+ return tdb->nesting_depth;
+}
+
+/* Transaction failure diagnostics */
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_user_abort (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ if (tdb->format != 1)
+ return 0;
+
+ return !!(tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_named_user_abort (void* const tdb_ptr, unsigned char* code)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ if (tdb->format != 1)
+ return 0;
+
+ if (tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE)
+ {
+ *code = tdb->abort_code - _HTM_FIRST_USER_ABORT_CODE;
+ return 1;
+ }
+ return 0;
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_illegal (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ return (tdb->format == 1
+ && (tdb->abort_code == 4 /* unfiltered program interruption */
+ || tdb->abort_code == 11 /* restricted instruction */));
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_footprint_exceeded (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ return (tdb->format == 1
+ && (tdb->abort_code == 7 /* fetch overflow */
+ || tdb->abort_code == 8 /* store overflow */));
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_nested_too_deep (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ return tdb->format == 1 && tdb->abort_code == 13; /* depth exceeded */
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_conflict (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ return (tdb->format == 1
+ && (tdb->abort_code == 9 /* fetch conflict */
+ || tdb->abort_code == 10 /* store conflict */));
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_failure_persistent (long const result)
+{
+ return result == _HTM_TBEGIN_PERSISTENT;
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_failure_address (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+ return tdb->atia;
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_failure_code (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ return tdb->abort_code;
+}
+
+#endif /* __s390__ */
+
+#endif /* __HTMXLINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/ia32intrin.h b/clang-2690385/lib64/clang/3.8/include/ia32intrin.h
new file mode 100644
index 00000000..5adf3f1f
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/ia32intrin.h
@@ -0,0 +1,101 @@
+/* ===-------- ia32intrin.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <ia32intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __IA32INTRIN_H
+#define __IA32INTRIN_H
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__readeflags(void)
+{
+ unsigned long long __res = 0;
+ __asm__ __volatile__ ("pushf\n\t"
+ "popq %0\n"
+ :"=r"(__res)
+ :
+ :
+ );
+ return __res;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__writeeflags(unsigned long long __f)
+{
+ __asm__ __volatile__ ("pushq %0\n\t"
+ "popf\n"
+ :
+ :"r"(__f)
+ :"flags"
+ );
+}
+
+#else /* !__x86_64__ */
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__readeflags(void)
+{
+ unsigned int __res = 0;
+ __asm__ __volatile__ ("pushf\n\t"
+ "popl %0\n"
+ :"=r"(__res)
+ :
+ :
+ );
+ return __res;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__writeeflags(unsigned int __f)
+{
+ __asm__ __volatile__ ("pushl %0\n\t"
+ "popf\n"
+ :
+ :"r"(__f)
+ :"flags"
+ );
+}
+#endif /* !__x86_64__ */
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdpmc(int __A) {
+ return __builtin_ia32_rdpmc(__A);
+}
+
+/* __rdtsc */
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdtsc(void) {
+ return __builtin_ia32_rdtsc();
+}
+
+/* __rdtscp */
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdtscp(unsigned int *__A) {
+ return __builtin_ia32_rdtscp(__A);
+}
+
+#define _rdtsc() __rdtsc()
+
+#endif /* __IA32INTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/immintrin.h b/clang-2690385/lib64/clang/3.8/include/immintrin.h
new file mode 100644
index 00000000..f3c6d191
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/immintrin.h
@@ -0,0 +1,172 @@
+/*===---- immintrin.h - Intel intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#define __IMMINTRIN_H
+
+#include <mmintrin.h>
+
+#include <xmmintrin.h>
+
+#include <emmintrin.h>
+
+#include <pmmintrin.h>
+
+#include <tmmintrin.h>
+
+#include <smmintrin.h>
+
+#include <wmmintrin.h>
+
+#include <avxintrin.h>
+
+#include <avx2intrin.h>
+
+/* The 256-bit versions of functions in f16cintrin.h.
+ Intel documents these as being in immintrin.h, and
+ they depend on typedefs from avxintrin.h. */
+
+#define _mm256_cvtps_ph(a, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)); })
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
+_mm256_cvtph_ps(__m128i __a)
+{
+ return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
+}
+
+#include <bmiintrin.h>
+
+#include <bmi2intrin.h>
+
+#include <lzcntintrin.h>
+
+#include <fmaintrin.h>
+
+#include <avx512fintrin.h>
+
+#include <avx512vlintrin.h>
+
+#include <avx512bwintrin.h>
+
+#include <avx512cdintrin.h>
+
+#include <avx512dqintrin.h>
+
+#include <avx512vlbwintrin.h>
+
+#include <avx512vldqintrin.h>
+
+#include <avx512erintrin.h>
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand16_step(unsigned short *__p)
+{
+ return __builtin_ia32_rdrand16_step(__p);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand32_step(unsigned int *__p)
+{
+ return __builtin_ia32_rdrand32_step(__p);
+}
+
+#ifdef __x86_64__
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand64_step(unsigned long long *__p)
+{
+ return __builtin_ia32_rdrand64_step(__p);
+}
+#endif
+
+#ifdef __x86_64__
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readfsbase_u32(void)
+{
+ return __builtin_ia32_rdfsbase32();
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readfsbase_u64(void)
+{
+ return __builtin_ia32_rdfsbase64();
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readgsbase_u32(void)
+{
+ return __builtin_ia32_rdgsbase32();
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readgsbase_u64(void)
+{
+ return __builtin_ia32_rdgsbase64();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writefsbase_u32(unsigned int __V)
+{
+ return __builtin_ia32_wrfsbase32(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writefsbase_u64(unsigned long long __V)
+{
+ return __builtin_ia32_wrfsbase64(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writegsbase_u32(unsigned int __V)
+{
+ return __builtin_ia32_wrgsbase32(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writegsbase_u64(unsigned long long __V)
+{
+ return __builtin_ia32_wrgsbase64(__V);
+}
+#endif
+
+#include <rtmintrin.h>
+
+#include <xtestintrin.h>
+
+#include <shaintrin.h>
+
+#include <fxsrintrin.h>
+
+#include <xsaveintrin.h>
+
+#include <xsaveoptintrin.h>
+
+#include <xsavecintrin.h>
+
+#include <xsavesintrin.h>
+
+/* Some intrinsics inside adxintrin.h are available only on processors with ADX,
+ * whereas others are also available at all times. */
+#include <adxintrin.h>
+
+#endif /* __IMMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/inttypes.h b/clang-2690385/lib64/clang/3.8/include/inttypes.h
new file mode 100644
index 00000000..3d59d141
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/inttypes.h
@@ -0,0 +1,102 @@
+/*===---- inttypes.h - Standard header for integer printf macros ----------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_INTTYPES_H
+#define __CLANG_INTTYPES_H
+
+#include_next <inttypes.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1900
+/* MSVC headers define int32_t as int, but PRIx32 as "lx" instead of "x".
+ * This triggers format warnings, so fix it up here. */
+#undef PRId32
+#undef PRIdLEAST32
+#undef PRIdFAST32
+#undef PRIi32
+#undef PRIiLEAST32
+#undef PRIiFAST32
+#undef PRIo32
+#undef PRIoLEAST32
+#undef PRIoFAST32
+#undef PRIu32
+#undef PRIuLEAST32
+#undef PRIuFAST32
+#undef PRIx32
+#undef PRIxLEAST32
+#undef PRIxFAST32
+#undef PRIX32
+#undef PRIXLEAST32
+#undef PRIXFAST32
+
+#undef SCNd32
+#undef SCNdLEAST32
+#undef SCNdFAST32
+#undef SCNi32
+#undef SCNiLEAST32
+#undef SCNiFAST32
+#undef SCNo32
+#undef SCNoLEAST32
+#undef SCNoFAST32
+#undef SCNu32
+#undef SCNuLEAST32
+#undef SCNuFAST32
+#undef SCNx32
+#undef SCNxLEAST32
+#undef SCNxFAST32
+
+#define PRId32 "d"
+#define PRIdLEAST32 "d"
+#define PRIdFAST32 "d"
+#define PRIi32 "i"
+#define PRIiLEAST32 "i"
+#define PRIiFAST32 "i"
+#define PRIo32 "o"
+#define PRIoLEAST32 "o"
+#define PRIoFAST32 "o"
+#define PRIu32 "u"
+#define PRIuLEAST32 "u"
+#define PRIuFAST32 "u"
+#define PRIx32 "x"
+#define PRIxLEAST32 "x"
+#define PRIxFAST32 "x"
+#define PRIX32 "X"
+#define PRIXLEAST32 "X"
+#define PRIXFAST32 "X"
+
+#define SCNd32 "d"
+#define SCNdLEAST32 "d"
+#define SCNdFAST32 "d"
+#define SCNi32 "i"
+#define SCNiLEAST32 "i"
+#define SCNiFAST32 "i"
+#define SCNo32 "o"
+#define SCNoLEAST32 "o"
+#define SCNoFAST32 "o"
+#define SCNu32 "u"
+#define SCNuLEAST32 "u"
+#define SCNuFAST32 "u"
+#define SCNx32 "x"
+#define SCNxLEAST32 "x"
+#define SCNxFAST32 "x"
+#endif
+
+#endif /* __CLANG_INTTYPES_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/iso646.h b/clang-2690385/lib64/clang/3.8/include/iso646.h
new file mode 100644
index 00000000..dca13c5b
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/iso646.h
@@ -0,0 +1,43 @@
+/*===---- iso646.h - Standard header for alternate spellings of operators---===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ISO646_H
+#define __ISO646_H
+
+#ifndef __cplusplus
+#define and &&
+#define and_eq &=
+#define bitand &
+#define bitor |
+#define compl ~
+#define not !
+#define not_eq !=
+#define or ||
+#define or_eq |=
+#define xor ^
+#define xor_eq ^=
+#endif
+
+#endif /* __ISO646_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/limits.h b/clang-2690385/lib64/clang/3.8/include/limits.h
new file mode 100644
index 00000000..f04187ce
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/limits.h
@@ -0,0 +1,118 @@
+/*===---- limits.h - Standard header for integer sizes --------------------===*\
+ *
+ * Copyright (c) 2009 Chris Lattner
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_LIMITS_H
+#define __CLANG_LIMITS_H
+
+/* The system's limits.h may, in turn, try to #include_next GCC's limits.h.
+ Avert this #include_next madness. */
+#if defined __GNUC__ && !defined _GCC_LIMITS_H_
+#define _GCC_LIMITS_H_
+#endif
+
+/* System headers include a number of constants from POSIX in <limits.h>.
+ Include it if we're hosted. */
+#if __STDC_HOSTED__ && __has_include_next(<limits.h>)
+#include_next <limits.h>
+#endif
+
+/* Many system headers try to "help us out" by defining these. No really, we
+ know how big each datatype is. */
+#undef SCHAR_MIN
+#undef SCHAR_MAX
+#undef UCHAR_MAX
+#undef SHRT_MIN
+#undef SHRT_MAX
+#undef USHRT_MAX
+#undef INT_MIN
+#undef INT_MAX
+#undef UINT_MAX
+#undef LONG_MIN
+#undef LONG_MAX
+#undef ULONG_MAX
+
+#undef CHAR_BIT
+#undef CHAR_MIN
+#undef CHAR_MAX
+
+/* C90/99 5.2.4.2.1 */
+#define SCHAR_MAX __SCHAR_MAX__
+#define SHRT_MAX __SHRT_MAX__
+#define INT_MAX __INT_MAX__
+#define LONG_MAX __LONG_MAX__
+
+#define SCHAR_MIN (-__SCHAR_MAX__-1)
+#define SHRT_MIN (-__SHRT_MAX__ -1)
+#define INT_MIN (-__INT_MAX__ -1)
+#define LONG_MIN (-__LONG_MAX__ -1L)
+
+#define UCHAR_MAX (__SCHAR_MAX__*2 +1)
+#define USHRT_MAX (__SHRT_MAX__ *2 +1)
+#define UINT_MAX (__INT_MAX__ *2U +1U)
+#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
+
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+#define CHAR_BIT __CHAR_BIT__
+
+#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */
+#define CHAR_MIN 0
+#define CHAR_MAX UCHAR_MAX
+#else
+#define CHAR_MIN SCHAR_MIN
+#define CHAR_MAX __SCHAR_MAX__
+#endif
+
+/* C99 5.2.4.2.1: Added long long.
+ C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
+ */
+#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L
+
+#undef LLONG_MIN
+#undef LLONG_MAX
+#undef ULLONG_MAX
+
+#define LLONG_MAX __LONG_LONG_MAX__
+#define LLONG_MIN (-__LONG_LONG_MAX__-1LL)
+#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
+#endif
+
+/* LONG_LONG_MIN/LONG_LONG_MAX/ULONG_LONG_MAX are a GNU extension. It's too bad
+ that we don't have something like #pragma poison that could be used to
+ deprecate a macro - the code should just use LLONG_MAX and friends.
+ */
+#if defined(__GNU_LIBRARY__) ? defined(__USE_GNU) : !defined(__STRICT_ANSI__)
+
+#undef LONG_LONG_MIN
+#undef LONG_LONG_MAX
+#undef ULONG_LONG_MAX
+
+#define LONG_LONG_MAX __LONG_LONG_MAX__
+#define LONG_LONG_MIN (-__LONG_LONG_MAX__-1LL)
+#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
+#endif
+
+#endif /* __CLANG_LIMITS_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/lzcntintrin.h b/clang-2690385/lib64/clang/3.8/include/lzcntintrin.h
new file mode 100644
index 00000000..4c00e42a
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/lzcntintrin.h
@@ -0,0 +1,68 @@
+/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __LZCNTINTRIN_H
+#define __LZCNTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__lzcnt16(unsigned short __X)
+{
+ return __X ? __builtin_clzs(__X) : 16;
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__lzcnt32(unsigned int __X)
+{
+ return __X ? __builtin_clz(__X) : 32;
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_lzcnt_u32(unsigned int __X)
+{
+ return __X ? __builtin_clz(__X) : 32;
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__lzcnt64(unsigned long long __X)
+{
+ return __X ? __builtin_clzll(__X) : 64;
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_lzcnt_u64(unsigned long long __X)
+{
+ return __X ? __builtin_clzll(__X) : 64;
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __LZCNTINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/mm3dnow.h b/clang-2690385/lib64/clang/3.8/include/mm3dnow.h
new file mode 100644
index 00000000..cb93faf2
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/mm3dnow.h
@@ -0,0 +1,171 @@
+/*===---- mm3dnow.h - 3DNow! intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _MM3DNOW_H_INCLUDED
+#define _MM3DNOW_H_INCLUDED
+
+#include <mmintrin.h>
+#include <prfchwintrin.h>
+
+typedef float __v2sf __attribute__((__vector_size__(8)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_m_femms() {
+ __builtin_ia32_femms();
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pavgusb(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pf2id(__m64 __m) {
+ return (__m64)__builtin_ia32_pf2id((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfadd(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfadd((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfcmpeq(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfcmpge(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpge((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfcmpgt(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfmax(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmax((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfmin(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmin((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfmul(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmul((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrcp(__m64 __m) {
+ return (__m64)__builtin_ia32_pfrcp((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrcpit1(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrcpit2(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrsqrt(__m64 __m) {
+ return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrsqrtit1(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfsub(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfsub((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfsubr(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfsubr((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pi2fd(__m64 __m) {
+ return (__m64)__builtin_ia32_pi2fd((__v2si)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pmulhrw(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Handle the 3dnowa instructions here. */
+#undef __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa")))
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pf2iw(__m64 __m) {
+ return (__m64)__builtin_ia32_pf2iw((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfnacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfnacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfpnacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfpnacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pi2fw(__m64 __m) {
+ return (__m64)__builtin_ia32_pi2fw((__v2si)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pswapdsf(__m64 __m) {
+ return (__m64)__builtin_ia32_pswapdsf((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pswapdsi(__m64 __m) {
+ return (__m64)__builtin_ia32_pswapdsi((__v2si)__m);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/mm_malloc.h b/clang-2690385/lib64/clang/3.8/include/mm_malloc.h
new file mode 100644
index 00000000..305afd31
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/mm_malloc.h
@@ -0,0 +1,75 @@
+/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __MM_MALLOC_H
+#define __MM_MALLOC_H
+
+#include <stdlib.h>
+
+#ifdef _WIN32
+#include <malloc.h>
+#else
+#ifndef __cplusplus
+extern int posix_memalign(void **__memptr, size_t __alignment, size_t __size);
+#else
+// Some systems (e.g. those with GNU libc) declare posix_memalign with an
+// exception specifier. Via an "egregious workaround" in
+// Sema::CheckEquivalentExceptionSpec, Clang accepts the following as a valid
+// redeclaration of glibc's declaration.
+extern "C" int posix_memalign(void **__memptr, size_t __alignment, size_t __size);
+#endif
+#endif
+
+#if !(defined(_WIN32) && defined(_mm_malloc))
+static __inline__ void *__attribute__((__always_inline__, __nodebug__,
+ __malloc__))
+_mm_malloc(size_t __size, size_t __align)
+{
+ if (__align == 1) {
+ return malloc(__size);
+ }
+
+ if (!(__align & (__align - 1)) && __align < sizeof(void *))
+ __align = sizeof(void *);
+
+ void *__mallocedMemory;
+#if defined(__MINGW32__)
+ __mallocedMemory = __mingw_aligned_malloc(__size, __align);
+#elif defined(_WIN32)
+ __mallocedMemory = _aligned_malloc(__size, __align);
+#else
+ if (posix_memalign(&__mallocedMemory, __align, __size))
+ return 0;
+#endif
+
+ return __mallocedMemory;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_free(void *__p)
+{
+ free(__p);
+}
+#endif
+
+#endif /* __MM_MALLOC_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/mmintrin.h b/clang-2690385/lib64/clang/3.8/include/mmintrin.h
new file mode 100644
index 00000000..162cb1aa
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/mmintrin.h
@@ -0,0 +1,503 @@
+/*===---- mmintrin.h - MMX intrinsics --------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __MMINTRIN_H
+#define __MMINTRIN_H
+
+typedef long long __m64 __attribute__((__vector_size__(8)));
+
+typedef int __v2si __attribute__((__vector_size__(8)));
+typedef short __v4hi __attribute__((__vector_size__(8)));
+typedef char __v8qi __attribute__((__vector_size__(8)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_empty(void)
+{
+ __builtin_ia32_emms();
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtsi32_si64(int __i)
+{
+ return (__m64)__builtin_ia32_vec_init_v2si(__i, 0);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtsi64_si32(__m64 __m)
+{
+ return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtsi64_m64(long long __i)
+{
+ return (__m64)__i;
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtm64_si64(__m64 __m)
+{
+ return (long long)__m;
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_packs_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_packs_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_packs_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pu8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pu8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_madd_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mulhi_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mullo_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sll_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_slli_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sll_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_pslld((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_slli_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sll_si64(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psllq(__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_slli_si64(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psllqi(__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sra_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srai_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sra_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srai_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psradi((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srl_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srli_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srl_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srli_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srl_si64(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrlq(__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srli_si64(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrlqi(__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_and_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pand(__m1, __m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_andnot_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pandn(__m1, __m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_or_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_por(__m1, __m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_xor_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pxor(__m1, __m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setzero_si64(void)
+{
+ return (__m64){ 0LL };
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set_pi32(int __i1, int __i0)
+{
+ return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
+{
+ return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
+ char __b1, char __b0)
+{
+ return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3,
+ __b4, __b5, __b6, __b7);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set1_pi32(int __i)
+{
+ return _mm_set_pi32(__i, __i);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set1_pi16(short __w)
+{
+ return _mm_set_pi16(__w, __w, __w, __w);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set1_pi8(char __b)
+{
+ return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setr_pi32(int __i0, int __i1)
+{
+ return _mm_set_pi32(__i1, __i0);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setr_pi16(short __w0, short __w1, short __w2, short __w3)
+{
+ return _mm_set_pi16(__w3, __w2, __w1, __w0);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
+ char __b6, char __b7)
+{
+ return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+/* Aliases for compatibility. */
+#define _m_empty _mm_empty
+#define _m_from_int _mm_cvtsi32_si64
+#define _m_from_int64 _mm_cvtsi64_m64
+#define _m_to_int _mm_cvtsi64_si32
+#define _m_to_int64 _mm_cvtm64_si64
+#define _m_packsswb _mm_packs_pi16
+#define _m_packssdw _mm_packs_pi32
+#define _m_packuswb _mm_packs_pu16
+#define _m_punpckhbw _mm_unpackhi_pi8
+#define _m_punpckhwd _mm_unpackhi_pi16
+#define _m_punpckhdq _mm_unpackhi_pi32
+#define _m_punpcklbw _mm_unpacklo_pi8
+#define _m_punpcklwd _mm_unpacklo_pi16
+#define _m_punpckldq _mm_unpacklo_pi32
+#define _m_paddb _mm_add_pi8
+#define _m_paddw _mm_add_pi16
+#define _m_paddd _mm_add_pi32
+#define _m_paddsb _mm_adds_pi8
+#define _m_paddsw _mm_adds_pi16
+#define _m_paddusb _mm_adds_pu8
+#define _m_paddusw _mm_adds_pu16
+#define _m_psubb _mm_sub_pi8
+#define _m_psubw _mm_sub_pi16
+#define _m_psubd _mm_sub_pi32
+#define _m_psubsb _mm_subs_pi8
+#define _m_psubsw _mm_subs_pi16
+#define _m_psubusb _mm_subs_pu8
+#define _m_psubusw _mm_subs_pu16
+#define _m_pmaddwd _mm_madd_pi16
+#define _m_pmulhw _mm_mulhi_pi16
+#define _m_pmullw _mm_mullo_pi16
+#define _m_psllw _mm_sll_pi16
+#define _m_psllwi _mm_slli_pi16
+#define _m_pslld _mm_sll_pi32
+#define _m_pslldi _mm_slli_pi32
+#define _m_psllq _mm_sll_si64
+#define _m_psllqi _mm_slli_si64
+#define _m_psraw _mm_sra_pi16
+#define _m_psrawi _mm_srai_pi16
+#define _m_psrad _mm_sra_pi32
+#define _m_psradi _mm_srai_pi32
+#define _m_psrlw _mm_srl_pi16
+#define _m_psrlwi _mm_srli_pi16
+#define _m_psrld _mm_srl_pi32
+#define _m_psrldi _mm_srli_pi32
+#define _m_psrlq _mm_srl_si64
+#define _m_psrlqi _mm_srli_si64
+#define _m_pand _mm_and_si64
+#define _m_pandn _mm_andnot_si64
+#define _m_por _mm_or_si64
+#define _m_pxor _mm_xor_si64
+#define _m_pcmpeqb _mm_cmpeq_pi8
+#define _m_pcmpeqw _mm_cmpeq_pi16
+#define _m_pcmpeqd _mm_cmpeq_pi32
+#define _m_pcmpgtb _mm_cmpgt_pi8
+#define _m_pcmpgtw _mm_cmpgt_pi16
+#define _m_pcmpgtd _mm_cmpgt_pi32
+
+#endif /* __MMINTRIN_H */
+
diff --git a/clang-2690385/lib64/clang/3.8/include/module.modulemap b/clang-2690385/lib64/clang/3.8/include/module.modulemap
new file mode 100644
index 00000000..b147e891
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/module.modulemap
@@ -0,0 +1,171 @@
+module _Builtin_intrinsics [system] [extern_c] {
+ explicit module altivec {
+ requires altivec
+ header "altivec.h"
+ }
+
+ explicit module arm {
+ requires arm
+
+ explicit module acle {
+ header "arm_acle.h"
+ export *
+ }
+
+ explicit module neon {
+ requires neon
+ header "arm_neon.h"
+ export *
+ }
+ }
+
+ explicit module intel {
+ requires x86
+ export *
+
+ header "immintrin.h"
+ header "x86intrin.h"
+
+ explicit module mm_malloc {
+ header "mm_malloc.h"
+ export * // note: for <stdlib.h> dependency
+ }
+
+ explicit module cpuid {
+ header "cpuid.h"
+ }
+
+ explicit module mmx {
+ header "mmintrin.h"
+ }
+
+ explicit module f16c {
+ header "f16cintrin.h"
+ }
+
+ explicit module sse {
+ export mmx
+ export sse2 // note: for hackish <emmintrin.h> dependency
+ header "xmmintrin.h"
+ }
+
+ explicit module sse2 {
+ export sse
+ header "emmintrin.h"
+ }
+
+ explicit module sse3 {
+ export sse2
+ header "pmmintrin.h"
+ }
+
+ explicit module ssse3 {
+ export sse3
+ header "tmmintrin.h"
+ }
+
+ explicit module sse4_1 {
+ export ssse3
+ header "smmintrin.h"
+ }
+
+ explicit module sse4_2 {
+ export sse4_1
+ header "nmmintrin.h"
+ }
+
+ explicit module sse4a {
+ export sse3
+ header "ammintrin.h"
+ }
+
+ explicit module avx {
+ export sse4_2
+ header "avxintrin.h"
+ }
+
+ explicit module avx2 {
+ export avx
+ header "avx2intrin.h"
+ }
+
+ explicit module avx512f {
+ export avx2
+ header "avx512fintrin.h"
+ }
+
+ explicit module avx512er {
+ header "avx512erintrin.h"
+ }
+
+ explicit module bmi {
+ header "bmiintrin.h"
+ }
+
+ explicit module bmi2 {
+ header "bmi2intrin.h"
+ }
+
+ explicit module fma {
+ header "fmaintrin.h"
+ }
+
+ explicit module fma4 {
+ export sse3
+ header "fma4intrin.h"
+ }
+
+ explicit module lzcnt {
+ header "lzcntintrin.h"
+ }
+
+ explicit module popcnt {
+ header "popcntintrin.h"
+ }
+
+ explicit module mm3dnow {
+ header "mm3dnow.h"
+ }
+
+ explicit module xop {
+ export fma4
+ header "xopintrin.h"
+ }
+
+ explicit module aes_pclmul {
+ header "wmmintrin.h"
+ export aes
+ export pclmul
+ }
+
+ explicit module aes {
+ header "__wmmintrin_aes.h"
+ }
+
+ explicit module pclmul {
+ header "__wmmintrin_pclmul.h"
+ }
+ }
+
+ explicit module systemz {
+ requires systemz
+ export *
+
+ header "s390intrin.h"
+
+ explicit module htm {
+ requires htm
+ header "htmintrin.h"
+ header "htmxlintrin.h"
+ }
+
+ explicit module zvector {
+ requires zvector, vx
+ header "vecintrin.h"
+ }
+ }
+}
+
+module _Builtin_stddef_max_align_t [system] [extern_c] {
+ header "__stddef_max_align_t.h"
+}
diff --git a/clang-2690385/lib64/clang/3.8/include/nmmintrin.h b/clang-2690385/lib64/clang/3.8/include/nmmintrin.h
new file mode 100644
index 00000000..57fec159
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/nmmintrin.h
@@ -0,0 +1,30 @@
+/*===---- nmmintrin.h - SSE4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _NMMINTRIN_H
+#define _NMMINTRIN_H
+
+/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,
+ just include it now then. */
+#include <smmintrin.h>
+#endif /* _NMMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/pmmintrin.h b/clang-2690385/lib64/clang/3.8/include/pmmintrin.h
new file mode 100644
index 00000000..0ff94091
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/pmmintrin.h
@@ -0,0 +1,116 @@
+/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __PMMINTRIN_H
+#define __PMMINTRIN_H
+
+#include <emmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse3")))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_lddqu_si128(__m128i const *__p)
+{
+ return (__m128i)__builtin_ia32_lddqu((char const *)__p);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_addsub_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_addsubps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_hadd_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_haddps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_hsub_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_hsubps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_movehdup_ps(__m128 __a)
+{
+ return __builtin_shufflevector(__a, __a, 1, 1, 3, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_moveldup_ps(__m128 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 0, 2, 2);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_addsub_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_addsubpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_hadd_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_haddpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_hsub_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_hsubpd(__a, __b);
+}
+
+#define _mm_loaddup_pd(dp) _mm_load1_pd(dp)
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_movedup_pd(__m128d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 0);
+}
+
+#define _MM_DENORMALS_ZERO_ON (0x0040)
+#define _MM_DENORMALS_ZERO_OFF (0x0000)
+
+#define _MM_DENORMALS_ZERO_MASK (0x0040)
+
+#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
+#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
+{
+ __builtin_ia32_monitor((void *)__p, __extensions, __hints);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mwait(unsigned __extensions, unsigned __hints)
+{
+ __builtin_ia32_mwait(__extensions, __hints);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __PMMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/popcntintrin.h b/clang-2690385/lib64/clang/3.8/include/popcntintrin.h
new file mode 100644
index 00000000..6fcda65c
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/popcntintrin.h
@@ -0,0 +1,58 @@
+/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _POPCNTINTRIN_H
+#define _POPCNTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_popcnt_u32(unsigned int __A)
+{
+ return __builtin_popcount(__A);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_popcnt32(int __A)
+{
+ return __builtin_popcount(__A);
+}
+
+#ifdef __x86_64__
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_popcnt_u64(unsigned long long __A)
+{
+ return __builtin_popcountll(__A);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_popcnt64(long long __A)
+{
+ return __builtin_popcountll(__A);
+}
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* _POPCNTINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/prfchwintrin.h b/clang-2690385/lib64/clang/3.8/include/prfchwintrin.h
new file mode 100644
index 00000000..ba028575
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/prfchwintrin.h
@@ -0,0 +1,45 @@
+/*===---- prfchwintrin.h - PREFETCHW intrinsic -----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined(__X86INTRIN_H) && !defined(_MM3DNOW_H_INCLUDED)
+#error "Never use <prfchwintrin.h> directly; include <x86intrin.h> or <mm3dnow.h> instead."
+#endif
+
+#ifndef __PRFCHWINTRIN_H
+#define __PRFCHWINTRIN_H
+
+#if defined(__PRFCHW__) || defined(__3dNOW__)
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_m_prefetch(void *__P)
+{
+ __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_m_prefetchw(void *__P)
+{
+ __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */);
+}
+#endif
+
+#endif /* __PRFCHWINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/rdseedintrin.h b/clang-2690385/lib64/clang/3.8/include/rdseedintrin.h
new file mode 100644
index 00000000..421f4ea4
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/rdseedintrin.h
@@ -0,0 +1,56 @@
+/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <rdseedintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __RDSEEDINTRIN_H
+#define __RDSEEDINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed")))
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_rdseed16_step(unsigned short *__p)
+{
+ return __builtin_ia32_rdseed16_step(__p);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_rdseed32_step(unsigned int *__p)
+{
+ return __builtin_ia32_rdseed32_step(__p);
+}
+
+#ifdef __x86_64__
+static __inline__ int __DEFAULT_FN_ATTRS
+_rdseed64_step(unsigned long long *__p)
+{
+ return __builtin_ia32_rdseed64_step(__p);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __RDSEEDINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/rtmintrin.h b/clang-2690385/lib64/clang/3.8/include/rtmintrin.h
new file mode 100644
index 00000000..e6a58d74
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/rtmintrin.h
@@ -0,0 +1,59 @@
+/*===---- rtmintrin.h - RTM intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <rtmintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __RTMINTRIN_H
+#define __RTMINTRIN_H
+
+#define _XBEGIN_STARTED (~0u)
+#define _XABORT_EXPLICIT (1 << 0)
+#define _XABORT_RETRY (1 << 1)
+#define _XABORT_CONFLICT (1 << 2)
+#define _XABORT_CAPACITY (1 << 3)
+#define _XABORT_DEBUG (1 << 4)
+#define _XABORT_NESTED (1 << 5)
+#define _XABORT_CODE(x) (((x) >> 24) & 0xFF)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rtm")))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_xbegin(void)
+{
+ return __builtin_ia32_xbegin();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xend(void)
+{
+ __builtin_ia32_xend();
+}
+
+#define _xabort(imm) __builtin_ia32_xabort((imm))
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __RTMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/s390intrin.h b/clang-2690385/lib64/clang/3.8/include/s390intrin.h
new file mode 100644
index 00000000..d51274c0
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/s390intrin.h
@@ -0,0 +1,39 @@
+/*===---- s390intrin.h - SystemZ intrinsics --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __S390INTRIN_H
+#define __S390INTRIN_H
+
+#ifndef __s390__
+#error "<s390intrin.h> is for s390 only"
+#endif
+
+#ifdef __HTM__
+#include <htmintrin.h>
+#endif
+
+#ifdef __VEC__
+#include <vecintrin.h>
+#endif
+
+#endif /* __S390INTRIN_H*/
diff --git a/clang-2690385/lib64/clang/3.8/include/sanitizer/allocator_interface.h b/clang-2690385/lib64/clang/3.8/include/sanitizer/allocator_interface.h
new file mode 100644
index 00000000..ab251f89
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/sanitizer/allocator_interface.h
@@ -0,0 +1,66 @@
+//===-- allocator_interface.h ---------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Public interface header for allocator used in sanitizers (ASan/TSan/MSan).
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
+#define SANITIZER_ALLOCATOR_INTERFACE_H
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /* Returns the estimated number of bytes that will be reserved by allocator
+ for request of "size" bytes. If allocator can't allocate that much
+ memory, returns the maximal possible allocation size, otherwise returns
+ "size". */
+ size_t __sanitizer_get_estimated_allocated_size(size_t size);
+
+ /* Returns true if p was returned by the allocator and
+ is not yet freed. */
+ int __sanitizer_get_ownership(const volatile void *p);
+
+ /* Returns the number of bytes reserved for the pointer p.
+ Requires (get_ownership(p) == true) or (p == 0). */
+ size_t __sanitizer_get_allocated_size(const volatile void *p);
+
+ /* Number of bytes, allocated and not yet freed by the application. */
+ size_t __sanitizer_get_current_allocated_bytes();
+
+ /* Number of bytes, mmaped by the allocator to fulfill allocation requests.
+ Generally, for request of X bytes, allocator can reserve and add to free
+ lists a large number of chunks of size X to use them for future requests.
+ All these chunks count toward the heap size. Currently, allocator never
+ releases memory to OS (instead, it just puts freed chunks to free
+ lists). */
+ size_t __sanitizer_get_heap_size();
+
+ /* Number of bytes, mmaped by the allocator, which can be used to fulfill
+ allocation requests. When a user program frees memory chunk, it can first
+ fall into quarantine and will count toward __sanitizer_get_free_bytes()
+ later. */
+ size_t __sanitizer_get_free_bytes();
+
+ /* Number of bytes in unmapped pages, that are released to OS. Currently,
+ always returns 0. */
+ size_t __sanitizer_get_unmapped_bytes();
+
+ /* Malloc hooks that may be optionally provided by user.
+ __sanitizer_malloc_hook(ptr, size) is called immediately after
+ allocation of "size" bytes, which returned "ptr".
+ __sanitizer_free_hook(ptr) is called immediately before
+ deallocation of "ptr". */
+ void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
+ void __sanitizer_free_hook(const volatile void *ptr);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/sanitizer/asan_interface.h b/clang-2690385/lib64/clang/3.8/include/sanitizer/asan_interface.h
new file mode 100644
index 00000000..97ba0ceb
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/sanitizer/asan_interface.h
@@ -0,0 +1,151 @@
+//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer.
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ASAN_INTERFACE_H
+#define SANITIZER_ASAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ // Marks memory region [addr, addr+size) as unaddressable.
+ // This memory must be previously allocated by the user program. Accessing
+ // addresses in this region from instrumented code is forbidden until
+ // this region is unpoisoned. This function is not guaranteed to poison
+ // the whole region - it may poison only subregion of [addr, addr+size) due
+ // to ASan alignment restrictions.
+ // Method is NOT thread-safe in the sense that no two threads can
+ // (un)poison memory in the same memory region simultaneously.
+ void __asan_poison_memory_region(void const volatile *addr, size_t size);
+ // Marks memory region [addr, addr+size) as addressable.
+ // This memory must be previously allocated by the user program. Accessing
+ // addresses in this region is allowed until this region is poisoned again.
+ // This function may unpoison a superregion of [addr, addr+size) due to
+ // ASan alignment restrictions.
+ // Method is NOT thread-safe in the sense that no two threads can
+ // (un)poison memory in the same memory region simultaneously.
+ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
+
+// User code should use macros instead of functions.
+#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+#define ASAN_POISON_MEMORY_REGION(addr, size) \
+ __asan_poison_memory_region((addr), (size))
+#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
+ __asan_unpoison_memory_region((addr), (size))
+#else
+#define ASAN_POISON_MEMORY_REGION(addr, size) \
+ ((void)(addr), (void)(size))
+#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
+ ((void)(addr), (void)(size))
+#endif
+
+ // Returns 1 if addr is poisoned (i.e. 1-byte read/write access to this
+ // address will result in error report from AddressSanitizer).
+ // Otherwise returns 0.
+ int __asan_address_is_poisoned(void const volatile *addr);
+
+ // If at least one byte in [beg, beg+size) is poisoned, return the address
+ // of the first such byte. Otherwise return 0.
+ void *__asan_region_is_poisoned(void *beg, size_t size);
+
+ // Print the description of addr (useful when debugging in gdb).
+ void __asan_describe_address(void *addr);
+
+ // Useful for calling from a debugger to get information about an ASan error.
+ // Returns 1 if an error has been (or is being) reported, otherwise returns 0.
+ int __asan_report_present();
+
+ // Useful for calling from a debugger to get information about an ASan error.
+ // If an error has been (or is being) reported, the following functions return
+ // the pc, bp, sp, address, access type (0 = read, 1 = write), access size and
+ // bug description (e.g. "heap-use-after-free"). Otherwise they return 0.
+ void *__asan_get_report_pc();
+ void *__asan_get_report_bp();
+ void *__asan_get_report_sp();
+ void *__asan_get_report_address();
+ int __asan_get_report_access_type();
+ size_t __asan_get_report_access_size();
+ const char *__asan_get_report_description();
+
+ // Useful for calling from the debugger to get information about a pointer.
+ // Returns the category of the given pointer as a constant string.
+ // Possible return values are "global", "stack", "stack-fake", "heap",
+ // "heap-invalid", "shadow-low", "shadow-gap", "shadow-high", "unknown".
+ // If global or stack, tries to also return the variable name, address and
+ // size. If heap, tries to return the chunk address and size. 'name' should
+ // point to an allocated buffer of size 'name_size'.
+ const char *__asan_locate_address(void *addr, char *name, size_t name_size,
+ void **region_address, size_t *region_size);
+
+ // Useful for calling from the debugger to get the allocation stack trace
+ // and thread ID for a heap address. Stores up to 'size' frames into 'trace',
+ // returns the number of stored frames or 0 on error.
+ size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
+ int *thread_id);
+
+ // Useful for calling from the debugger to get the free stack trace
+ // and thread ID for a heap address. Stores up to 'size' frames into 'trace',
+ // returns the number of stored frames or 0 on error.
+ size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
+ int *thread_id);
+
+ // Useful for calling from the debugger to get the current shadow memory
+ // mapping.
+ void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
+
+ // This is an internal function that is called to report an error.
+ // However it is still a part of the interface because users may want to
+ // set a breakpoint on this function in a debugger.
+ void __asan_report_error(void *pc, void *bp, void *sp,
+ void *addr, int is_write, size_t access_size);
+
+ // Deprecated. Call __sanitizer_set_death_callback instead.
+ void __asan_set_death_callback(void (*callback)(void));
+
+ void __asan_set_error_report_callback(void (*callback)(const char*));
+
+ // User may provide function that would be called right when ASan detects
+ // an error. This can be used to notice cases when ASan detects an error, but
+ // the program crashes before ASan report is printed.
+ void __asan_on_error();
+
+ // Prints accumulated stats to stderr. Used for debugging.
+ void __asan_print_accumulated_stats();
+
+ // This function may be optionally provided by user and should return
+ // a string containing ASan runtime options. See asan_flags.h for details.
+ const char* __asan_default_options();
+
+ // The following 2 functions facilitate garbage collection in presence of
+ // asan's fake stack.
+
+ // Returns an opaque handler to be used later in __asan_addr_is_in_fake_stack.
+ // Returns NULL if the current thread does not have a fake stack.
+ void *__asan_get_current_fake_stack();
+
+ // If fake_stack is non-NULL and addr belongs to a fake frame in
+ // fake_stack, returns the address on real stack that corresponds to
+ // the fake frame and sets beg/end to the boundaries of this fake frame.
+ // Otherwise returns NULL and does not touch beg/end.
+ // If beg/end are NULL, they are not touched.
+ // This function may be called from a thread other than the owner of
+ // fake_stack, but the owner thread need to be alive.
+ void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_ASAN_INTERFACE_H
diff --git a/clang-2690385/lib64/clang/3.8/include/sanitizer/common_interface_defs.h b/clang-2690385/lib64/clang/3.8/include/sanitizer/common_interface_defs.h
new file mode 100644
index 00000000..b736ed9e
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/sanitizer/common_interface_defs.h
@@ -0,0 +1,135 @@
+//===-- sanitizer/common_interface_defs.h -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common part of the public sanitizer interface.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_COMMON_INTERFACE_DEFS_H
+#define SANITIZER_COMMON_INTERFACE_DEFS_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+// GCC does not understand __has_feature.
+#if !defined(__has_feature)
+# define __has_feature(x) 0
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ // Arguments for __sanitizer_sandbox_on_notify() below.
+ typedef struct {
+ // Enable sandbox support in sanitizer coverage.
+ int coverage_sandboxed;
+ // File descriptor to write coverage data to. If -1 is passed, a file will
+ // be pre-opened by __sanitizer_sandobx_on_notify(). This field has no
+ // effect if coverage_sandboxed == 0.
+ intptr_t coverage_fd;
+ // If non-zero, split the coverage data into well-formed blocks. This is
+ // useful when coverage_fd is a socket descriptor. Each block will contain
+ // a header, allowing data from multiple processes to be sent over the same
+ // socket.
+ unsigned int coverage_max_block_size;
+ } __sanitizer_sandbox_arguments;
+
+ // Tell the tools to write their reports to "path.<pid>" instead of stderr.
+ void __sanitizer_set_report_path(const char *path);
+
+ // Notify the tools that the sandbox is going to be turned on. The reserved
+ // parameter will be used in the future to hold a structure with functions
+ // that the tools may call to bypass the sandbox.
+ void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
+
+ // This function is called by the tool when it has just finished reporting
+ // an error. 'error_summary' is a one-line string that summarizes
+ // the error message. This function can be overridden by the client.
+ void __sanitizer_report_error_summary(const char *error_summary);
+
+ // Some of the sanitizers (e.g. asan/tsan) may miss bugs that happen
+ // in unaligned loads/stores. In order to find such bugs reliably one needs
+ // to replace plain unaligned loads/stores with these calls.
+ uint16_t __sanitizer_unaligned_load16(const void *p);
+ uint32_t __sanitizer_unaligned_load32(const void *p);
+ uint64_t __sanitizer_unaligned_load64(const void *p);
+ void __sanitizer_unaligned_store16(void *p, uint16_t x);
+ void __sanitizer_unaligned_store32(void *p, uint32_t x);
+ void __sanitizer_unaligned_store64(void *p, uint64_t x);
+
+ // Annotate the current state of a contiguous container, such as
+ // std::vector, std::string or similar.
+ // A contiguous container is a container that keeps all of its elements
+ // in a contiguous region of memory. The container owns the region of memory
+ // [beg, end); the memory [beg, mid) is used to store the current elements
+ // and the memory [mid, end) is reserved for future elements;
+ // beg <= mid <= end. For example, in "std::vector<> v"
+ // beg = &v[0];
+ // end = beg + v.capacity() * sizeof(v[0]);
+ // mid = beg + v.size() * sizeof(v[0]);
+ //
+ // This annotation tells the Sanitizer tool about the current state of the
+ // container so that the tool can report errors when memory from [mid, end)
+ // is accessed. Insert this annotation into methods like push_back/pop_back.
+ // Supply the old and the new values of mid (old_mid/new_mid).
+ // In the initial state mid == end and so should be the final
+ // state when the container is destroyed or when it reallocates the storage.
+ //
+ // Use with caution and don't use for anything other than vector-like classes.
+ //
+ // For AddressSanitizer, 'beg' should be 8-aligned and 'end' should
+ // be either 8-aligned or it should point to the end of a separate heap-,
+ // stack-, or global- allocated buffer. I.e. the following will not work:
+ // int64_t x[2]; // 16 bytes, 8-aligned.
+ // char *beg = (char *)&x[0];
+ // char *end = beg + 12; // Not 8 aligned, not the end of the buffer.
+ // This however will work fine:
+ // int32_t x[3]; // 12 bytes, but 8-aligned under AddressSanitizer.
+ // char *beg = (char*)&x[0];
+ // char *end = beg + 12; // Not 8-aligned, but is the end of the buffer.
+ void __sanitizer_annotate_contiguous_container(const void *beg,
+ const void *end,
+ const void *old_mid,
+ const void *new_mid);
+ // Returns true if the contiguous container [beg, end) is properly poisoned
+ // (e.g. with __sanitizer_annotate_contiguous_container), i.e. if
+ // - [beg, mid) is addressable,
+ // - [mid, end) is unaddressable.
+ // Full verification requires O(end-beg) time; this function tries to avoid
+ // such complexity by touching only parts of the container around beg/mid/end.
+ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
+
+ // Similar to __sanitizer_verify_contiguous_container but returns the address
+ // of the first improperly poisoned byte otherwise. Returns null if the area
+ // is poisoned properly.
+ const void *__sanitizer_contiguous_container_find_bad_address(
+ const void *beg, const void *mid, const void *end);
+
+ // Print the stack trace leading to this call. Useful for debugging user code.
+ void __sanitizer_print_stack_trace();
+
+ // Sets the callback to be called right before death on error.
+ // Passing 0 will unset the callback.
+ void __sanitizer_set_death_callback(void (*callback)(void));
+
+ // Interceptor hooks.
+ // Whenever a libc function interceptor is called it checks if the
+ // corresponding weak hook is defined, and it so -- calls it.
+ // The primary use case is data-flow-guided fuzzing, where the fuzzer needs
+ // to know what is being passed to libc functions, e.g. memcmp.
+ // FIXME: implement more hooks.
+ void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
+ const void *s2, size_t n);
+ void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
+ const char *s2, size_t n);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_COMMON_INTERFACE_DEFS_H
diff --git a/clang-2690385/lib64/clang/3.8/include/sanitizer/coverage_interface.h b/clang-2690385/lib64/clang/3.8/include/sanitizer/coverage_interface.h
new file mode 100644
index 00000000..b93111b8
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/sanitizer/coverage_interface.h
@@ -0,0 +1,65 @@
+//===-- sanitizer/coverage_interface.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Public interface for sanitizer coverage.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_COVERAG_INTERFACE_H
+#define SANITIZER_COVERAG_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ // Initialize coverage.
+ void __sanitizer_cov_init();
+ // Record and dump coverage info.
+ void __sanitizer_cov_dump();
+ // Open <name>.sancov.packed in the coverage directory and return the file
+ // descriptor. Returns -1 on failure, or if coverage dumping is disabled.
+ // This is intended for use by sandboxing code.
+ intptr_t __sanitizer_maybe_open_cov_file(const char *name);
+ // Get the number of unique covered blocks (or edges).
+ // This can be useful for coverage-directed in-process fuzzers.
+ uintptr_t __sanitizer_get_total_unique_coverage();
+ // Get the number of unique indirect caller-callee pairs.
+ uintptr_t __sanitizer_get_total_unique_caller_callee_pairs();
+
+ // Reset the basic-block (edge) coverage to the initial state.
+ // Useful for in-process fuzzing to start collecting coverage from scratch.
+ // Experimental, will likely not work for multi-threaded process.
+ void __sanitizer_reset_coverage();
+ // Set *data to the array of covered PCs and return the size of that array.
+ // Some of the entries in *data will be zero.
+ uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data);
+
+ // The coverage instrumentation may optionally provide imprecise counters.
+ // Rather than exposing the counter values to the user we instead map
+ // the counters to a bitset.
+ // Every counter is associated with 8 bits in the bitset.
+ // We define 8 value ranges: 1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+
+ // The i-th bit is set to 1 if the counter value is in the i-th range.
+ // This counter-based coverage implementation is *not* thread-safe.
+
+ // Returns the number of registered coverage counters.
+ uintptr_t __sanitizer_get_number_of_counters();
+ // Updates the counter 'bitset', clears the counters and returns the number of
+ // new bits in 'bitset'.
+ // If 'bitset' is nullptr, only clears the counters.
+ // Otherwise 'bitset' should be at least
+ // __sanitizer_get_number_of_counters bytes long and 8-aligned.
+ uintptr_t
+ __sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_COVERAG_INTERFACE_H
diff --git a/clang-2690385/lib64/clang/3.8/include/sanitizer/dfsan_interface.h b/clang-2690385/lib64/clang/3.8/include/sanitizer/dfsan_interface.h
new file mode 100644
index 00000000..05666f73
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/sanitizer/dfsan_interface.h
@@ -0,0 +1,116 @@
+//===-- dfsan_interface.h -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef DFSAN_INTERFACE_H
+#define DFSAN_INTERFACE_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t dfsan_label;
+
+/// Stores information associated with a specific label identifier. A label
+/// may be a base label created using dfsan_create_label, with associated
+/// text description and user data, or an automatically created union label,
+/// which represents the union of two label identifiers (which may themselves
+/// be base or union labels).
+struct dfsan_label_info {
+ // Fields for union labels, set to 0 for base labels.
+ dfsan_label l1;
+ dfsan_label l2;
+
+ // Fields for base labels.
+ const char *desc;
+ void *userdata;
+};
+
+/// Signature of the callback argument to dfsan_set_write_callback().
+typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
+
+/// Computes the union of \c l1 and \c l2, possibly creating a union label in
+/// the process.
+dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
+
+/// Creates and returns a base label with the given description and user data.
+dfsan_label dfsan_create_label(const char *desc, void *userdata);
+
+/// Sets the label for each address in [addr,addr+size) to \c label.
+void dfsan_set_label(dfsan_label label, void *addr, size_t size);
+
+/// Sets the label for each address in [addr,addr+size) to the union of the
+/// current label for that address and \c label.
+void dfsan_add_label(dfsan_label label, void *addr, size_t size);
+
+/// Retrieves the label associated with the given data.
+///
+/// The type of 'data' is arbitrary. The function accepts a value of any type,
+/// which can be truncated or extended (implicitly or explicitly) as necessary.
+/// The truncation/extension operations will preserve the label of the original
+/// value.
+dfsan_label dfsan_get_label(long data);
+
+/// Retrieves the label associated with the data at the given address.
+dfsan_label dfsan_read_label(const void *addr, size_t size);
+
+/// Retrieves a pointer to the dfsan_label_info struct for the given label.
+const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label);
+
+/// Returns whether the given label label contains the label elem.
+int dfsan_has_label(dfsan_label label, dfsan_label elem);
+
+/// If the given label label contains a label with the description desc, returns
+/// that label, else returns 0.
+dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
+
+/// Returns the number of labels allocated.
+size_t dfsan_get_label_count(void);
+
+/// Sets a callback to be invoked on calls to write(). The callback is invoked
+/// before the write is done. The write is not guaranteed to succeed when the
+/// callback executes. Pass in NULL to remove any callback.
+void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
+
+/// Writes the labels currently used by the program to the given file
+/// descriptor. The lines of the output have the following format:
+///
+/// <label> <parent label 1> <parent label 2> <label description if any>
+void dfsan_dump_labels(int fd);
+
+/// Interceptor hooks.
+/// Whenever a dfsan's custom function is called the corresponding
+/// hook is called it non-zero. The hooks should be defined by the user.
+/// The primary use case is taint-guided fuzzing, where the fuzzer
+/// needs to see the parameters of the function and the labels.
+/// FIXME: implement more hooks.
+void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
+ size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label);
+void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
+ size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label);
+#ifdef __cplusplus
+} // extern "C"
+
+template <typename T>
+void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
+ dfsan_set_label(label, (void *)&data, sizeof(T));
+}
+
+#endif
+
+#endif // DFSAN_INTERFACE_H
diff --git a/clang-2690385/lib64/clang/3.8/include/sanitizer/linux_syscall_hooks.h b/clang-2690385/lib64/clang/3.8/include/sanitizer/linux_syscall_hooks.h
new file mode 100644
index 00000000..89867c15
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/sanitizer/linux_syscall_hooks.h
@@ -0,0 +1,3070 @@
+//===-- linux_syscall_hooks.h ---------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of public sanitizer interface.
+//
+// System call handlers.
+//
+// Interface methods declared in this header implement pre- and post- syscall
+// actions for the active sanitizer.
+// Usage:
+// __sanitizer_syscall_pre_getfoo(...args...);
+// long res = syscall(__NR_getfoo, ...args...);
+// __sanitizer_syscall_post_getfoo(res, ...args...);
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LINUX_SYSCALL_HOOKS_H
+#define SANITIZER_LINUX_SYSCALL_HOOKS_H
+
+#define __sanitizer_syscall_pre_time(tloc) \
+ __sanitizer_syscall_pre_impl_time((long)(tloc))
+#define __sanitizer_syscall_post_time(res, tloc) \
+ __sanitizer_syscall_post_impl_time(res, (long)(tloc))
+#define __sanitizer_syscall_pre_stime(tptr) \
+ __sanitizer_syscall_pre_impl_stime((long)(tptr))
+#define __sanitizer_syscall_post_stime(res, tptr) \
+ __sanitizer_syscall_post_impl_stime(res, (long)(tptr))
+#define __sanitizer_syscall_pre_gettimeofday(tv, tz) \
+ __sanitizer_syscall_pre_impl_gettimeofday((long)(tv), (long)(tz))
+#define __sanitizer_syscall_post_gettimeofday(res, tv, tz) \
+ __sanitizer_syscall_post_impl_gettimeofday(res, (long)(tv), (long)(tz))
+#define __sanitizer_syscall_pre_settimeofday(tv, tz) \
+ __sanitizer_syscall_pre_impl_settimeofday((long)(tv), (long)(tz))
+#define __sanitizer_syscall_post_settimeofday(res, tv, tz) \
+ __sanitizer_syscall_post_impl_settimeofday(res, (long)(tv), (long)(tz))
+#define __sanitizer_syscall_pre_adjtimex(txc_p) \
+ __sanitizer_syscall_pre_impl_adjtimex((long)(txc_p))
+#define __sanitizer_syscall_post_adjtimex(res, txc_p) \
+ __sanitizer_syscall_post_impl_adjtimex(res, (long)(txc_p))
+#define __sanitizer_syscall_pre_times(tbuf) \
+ __sanitizer_syscall_pre_impl_times((long)(tbuf))
+#define __sanitizer_syscall_post_times(res, tbuf) \
+ __sanitizer_syscall_post_impl_times(res, (long)(tbuf))
+#define __sanitizer_syscall_pre_gettid() __sanitizer_syscall_pre_impl_gettid()
+#define __sanitizer_syscall_post_gettid(res) \
+ __sanitizer_syscall_post_impl_gettid(res)
+#define __sanitizer_syscall_pre_nanosleep(rqtp, rmtp) \
+ __sanitizer_syscall_pre_impl_nanosleep((long)(rqtp), (long)(rmtp))
+#define __sanitizer_syscall_post_nanosleep(res, rqtp, rmtp) \
+ __sanitizer_syscall_post_impl_nanosleep(res, (long)(rqtp), (long)(rmtp))
+#define __sanitizer_syscall_pre_alarm(seconds) \
+ __sanitizer_syscall_pre_impl_alarm((long)(seconds))
+#define __sanitizer_syscall_post_alarm(res, seconds) \
+ __sanitizer_syscall_post_impl_alarm(res, (long)(seconds))
+#define __sanitizer_syscall_pre_getpid() __sanitizer_syscall_pre_impl_getpid()
+#define __sanitizer_syscall_post_getpid(res) \
+ __sanitizer_syscall_post_impl_getpid(res)
+#define __sanitizer_syscall_pre_getppid() __sanitizer_syscall_pre_impl_getppid()
+#define __sanitizer_syscall_post_getppid(res) \
+ __sanitizer_syscall_post_impl_getppid(res)
+#define __sanitizer_syscall_pre_getuid() __sanitizer_syscall_pre_impl_getuid()
+#define __sanitizer_syscall_post_getuid(res) \
+ __sanitizer_syscall_post_impl_getuid(res)
+#define __sanitizer_syscall_pre_geteuid() __sanitizer_syscall_pre_impl_geteuid()
+#define __sanitizer_syscall_post_geteuid(res) \
+ __sanitizer_syscall_post_impl_geteuid(res)
+#define __sanitizer_syscall_pre_getgid() __sanitizer_syscall_pre_impl_getgid()
+#define __sanitizer_syscall_post_getgid(res) \
+ __sanitizer_syscall_post_impl_getgid(res)
+#define __sanitizer_syscall_pre_getegid() __sanitizer_syscall_pre_impl_getegid()
+#define __sanitizer_syscall_post_getegid(res) \
+ __sanitizer_syscall_post_impl_getegid(res)
+#define __sanitizer_syscall_pre_getresuid(ruid, euid, suid) \
+ __sanitizer_syscall_pre_impl_getresuid((long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_post_getresuid(res, ruid, euid, suid) \
+ __sanitizer_syscall_post_impl_getresuid(res, (long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_pre_getresgid(rgid, egid, sgid) \
+ __sanitizer_syscall_pre_impl_getresgid((long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_post_getresgid(res, rgid, egid, sgid) \
+ __sanitizer_syscall_post_impl_getresgid(res, (long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_pre_getpgid(pid) \
+ __sanitizer_syscall_pre_impl_getpgid((long)(pid))
+#define __sanitizer_syscall_post_getpgid(res, pid) \
+ __sanitizer_syscall_post_impl_getpgid(res, (long)(pid))
+#define __sanitizer_syscall_pre_getpgrp() __sanitizer_syscall_pre_impl_getpgrp()
+#define __sanitizer_syscall_post_getpgrp(res) \
+ __sanitizer_syscall_post_impl_getpgrp(res)
+#define __sanitizer_syscall_pre_getsid(pid) \
+ __sanitizer_syscall_pre_impl_getsid((long)(pid))
+#define __sanitizer_syscall_post_getsid(res, pid) \
+ __sanitizer_syscall_post_impl_getsid(res, (long)(pid))
+#define __sanitizer_syscall_pre_getgroups(gidsetsize, grouplist) \
+ __sanitizer_syscall_pre_impl_getgroups((long)(gidsetsize), (long)(grouplist))
+#define __sanitizer_syscall_post_getgroups(res, gidsetsize, grouplist) \
+ __sanitizer_syscall_post_impl_getgroups(res, (long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_pre_setregid(rgid, egid) \
+ __sanitizer_syscall_pre_impl_setregid((long)(rgid), (long)(egid))
+#define __sanitizer_syscall_post_setregid(res, rgid, egid) \
+ __sanitizer_syscall_post_impl_setregid(res, (long)(rgid), (long)(egid))
+#define __sanitizer_syscall_pre_setgid(gid) \
+ __sanitizer_syscall_pre_impl_setgid((long)(gid))
+#define __sanitizer_syscall_post_setgid(res, gid) \
+ __sanitizer_syscall_post_impl_setgid(res, (long)(gid))
+#define __sanitizer_syscall_pre_setreuid(ruid, euid) \
+ __sanitizer_syscall_pre_impl_setreuid((long)(ruid), (long)(euid))
+#define __sanitizer_syscall_post_setreuid(res, ruid, euid) \
+ __sanitizer_syscall_post_impl_setreuid(res, (long)(ruid), (long)(euid))
+#define __sanitizer_syscall_pre_setuid(uid) \
+ __sanitizer_syscall_pre_impl_setuid((long)(uid))
+#define __sanitizer_syscall_post_setuid(res, uid) \
+ __sanitizer_syscall_post_impl_setuid(res, (long)(uid))
+#define __sanitizer_syscall_pre_setresuid(ruid, euid, suid) \
+ __sanitizer_syscall_pre_impl_setresuid((long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_post_setresuid(res, ruid, euid, suid) \
+ __sanitizer_syscall_post_impl_setresuid(res, (long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_pre_setresgid(rgid, egid, sgid) \
+ __sanitizer_syscall_pre_impl_setresgid((long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_post_setresgid(res, rgid, egid, sgid) \
+ __sanitizer_syscall_post_impl_setresgid(res, (long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_pre_setfsuid(uid) \
+ __sanitizer_syscall_pre_impl_setfsuid((long)(uid))
+#define __sanitizer_syscall_post_setfsuid(res, uid) \
+ __sanitizer_syscall_post_impl_setfsuid(res, (long)(uid))
+#define __sanitizer_syscall_pre_setfsgid(gid) \
+ __sanitizer_syscall_pre_impl_setfsgid((long)(gid))
+#define __sanitizer_syscall_post_setfsgid(res, gid) \
+ __sanitizer_syscall_post_impl_setfsgid(res, (long)(gid))
+#define __sanitizer_syscall_pre_setpgid(pid, pgid) \
+ __sanitizer_syscall_pre_impl_setpgid((long)(pid), (long)(pgid))
+#define __sanitizer_syscall_post_setpgid(res, pid, pgid) \
+ __sanitizer_syscall_post_impl_setpgid(res, (long)(pid), (long)(pgid))
+#define __sanitizer_syscall_pre_setsid() __sanitizer_syscall_pre_impl_setsid()
+#define __sanitizer_syscall_post_setsid(res) \
+ __sanitizer_syscall_post_impl_setsid(res)
+#define __sanitizer_syscall_pre_setgroups(gidsetsize, grouplist) \
+ __sanitizer_syscall_pre_impl_setgroups((long)(gidsetsize), (long)(grouplist))
+#define __sanitizer_syscall_post_setgroups(res, gidsetsize, grouplist) \
+ __sanitizer_syscall_post_impl_setgroups(res, (long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_pre_acct(name) \
+ __sanitizer_syscall_pre_impl_acct((long)(name))
+#define __sanitizer_syscall_post_acct(res, name) \
+ __sanitizer_syscall_post_impl_acct(res, (long)(name))
+#define __sanitizer_syscall_pre_capget(header, dataptr) \
+ __sanitizer_syscall_pre_impl_capget((long)(header), (long)(dataptr))
+#define __sanitizer_syscall_post_capget(res, header, dataptr) \
+ __sanitizer_syscall_post_impl_capget(res, (long)(header), (long)(dataptr))
+#define __sanitizer_syscall_pre_capset(header, data) \
+ __sanitizer_syscall_pre_impl_capset((long)(header), (long)(data))
+#define __sanitizer_syscall_post_capset(res, header, data) \
+ __sanitizer_syscall_post_impl_capset(res, (long)(header), (long)(data))
+#define __sanitizer_syscall_pre_personality(personality) \
+ __sanitizer_syscall_pre_impl_personality((long)(personality))
+#define __sanitizer_syscall_post_personality(res, personality) \
+ __sanitizer_syscall_post_impl_personality(res, (long)(personality))
+#define __sanitizer_syscall_pre_sigpending(set) \
+ __sanitizer_syscall_pre_impl_sigpending((long)(set))
+#define __sanitizer_syscall_post_sigpending(res, set) \
+ __sanitizer_syscall_post_impl_sigpending(res, (long)(set))
+#define __sanitizer_syscall_pre_sigprocmask(how, set, oset) \
+ __sanitizer_syscall_pre_impl_sigprocmask((long)(how), (long)(set), \
+ (long)(oset))
+#define __sanitizer_syscall_post_sigprocmask(res, how, set, oset) \
+ __sanitizer_syscall_post_impl_sigprocmask(res, (long)(how), (long)(set), \
+ (long)(oset))
+#define __sanitizer_syscall_pre_getitimer(which, value) \
+ __sanitizer_syscall_pre_impl_getitimer((long)(which), (long)(value))
+#define __sanitizer_syscall_post_getitimer(res, which, value) \
+ __sanitizer_syscall_post_impl_getitimer(res, (long)(which), (long)(value))
+#define __sanitizer_syscall_pre_setitimer(which, value, ovalue) \
+ __sanitizer_syscall_pre_impl_setitimer((long)(which), (long)(value), \
+ (long)(ovalue))
+#define __sanitizer_syscall_post_setitimer(res, which, value, ovalue) \
+ __sanitizer_syscall_post_impl_setitimer(res, (long)(which), (long)(value), \
+ (long)(ovalue))
+#define __sanitizer_syscall_pre_timer_create(which_clock, timer_event_spec, \
+ created_timer_id) \
+ __sanitizer_syscall_pre_impl_timer_create( \
+ (long)(which_clock), (long)(timer_event_spec), (long)(created_timer_id))
+#define __sanitizer_syscall_post_timer_create( \
+ res, which_clock, timer_event_spec, created_timer_id) \
+ __sanitizer_syscall_post_impl_timer_create(res, (long)(which_clock), \
+ (long)(timer_event_spec), \
+ (long)(created_timer_id))
+#define __sanitizer_syscall_pre_timer_gettime(timer_id, setting) \
+ __sanitizer_syscall_pre_impl_timer_gettime((long)(timer_id), (long)(setting))
+#define __sanitizer_syscall_post_timer_gettime(res, timer_id, setting) \
+ __sanitizer_syscall_post_impl_timer_gettime(res, (long)(timer_id), \
+ (long)(setting))
+#define __sanitizer_syscall_pre_timer_getoverrun(timer_id) \
+ __sanitizer_syscall_pre_impl_timer_getoverrun((long)(timer_id))
+#define __sanitizer_syscall_post_timer_getoverrun(res, timer_id) \
+ __sanitizer_syscall_post_impl_timer_getoverrun(res, (long)(timer_id))
+#define __sanitizer_syscall_pre_timer_settime(timer_id, flags, new_setting, \
+ old_setting) \
+ __sanitizer_syscall_pre_impl_timer_settime((long)(timer_id), (long)(flags), \
+ (long)(new_setting), \
+ (long)(old_setting))
+#define __sanitizer_syscall_post_timer_settime(res, timer_id, flags, \
+ new_setting, old_setting) \
+ __sanitizer_syscall_post_impl_timer_settime( \
+ res, (long)(timer_id), (long)(flags), (long)(new_setting), \
+ (long)(old_setting))
+#define __sanitizer_syscall_pre_timer_delete(timer_id) \
+ __sanitizer_syscall_pre_impl_timer_delete((long)(timer_id))
+#define __sanitizer_syscall_post_timer_delete(res, timer_id) \
+ __sanitizer_syscall_post_impl_timer_delete(res, (long)(timer_id))
+#define __sanitizer_syscall_pre_clock_settime(which_clock, tp) \
+ __sanitizer_syscall_pre_impl_clock_settime((long)(which_clock), (long)(tp))
+#define __sanitizer_syscall_post_clock_settime(res, which_clock, tp) \
+ __sanitizer_syscall_post_impl_clock_settime(res, (long)(which_clock), \
+ (long)(tp))
+#define __sanitizer_syscall_pre_clock_gettime(which_clock, tp) \
+ __sanitizer_syscall_pre_impl_clock_gettime((long)(which_clock), (long)(tp))
+#define __sanitizer_syscall_post_clock_gettime(res, which_clock, tp) \
+ __sanitizer_syscall_post_impl_clock_gettime(res, (long)(which_clock), \
+ (long)(tp))
+#define __sanitizer_syscall_pre_clock_adjtime(which_clock, tx) \
+ __sanitizer_syscall_pre_impl_clock_adjtime((long)(which_clock), (long)(tx))
+#define __sanitizer_syscall_post_clock_adjtime(res, which_clock, tx) \
+ __sanitizer_syscall_post_impl_clock_adjtime(res, (long)(which_clock), \
+ (long)(tx))
+#define __sanitizer_syscall_pre_clock_getres(which_clock, tp) \
+ __sanitizer_syscall_pre_impl_clock_getres((long)(which_clock), (long)(tp))
+#define __sanitizer_syscall_post_clock_getres(res, which_clock, tp) \
+ __sanitizer_syscall_post_impl_clock_getres(res, (long)(which_clock), \
+ (long)(tp))
+#define __sanitizer_syscall_pre_clock_nanosleep(which_clock, flags, rqtp, \
+ rmtp) \
+ __sanitizer_syscall_pre_impl_clock_nanosleep( \
+ (long)(which_clock), (long)(flags), (long)(rqtp), (long)(rmtp))
+#define __sanitizer_syscall_post_clock_nanosleep(res, which_clock, flags, \
+ rqtp, rmtp) \
+ __sanitizer_syscall_post_impl_clock_nanosleep( \
+ res, (long)(which_clock), (long)(flags), (long)(rqtp), (long)(rmtp))
+#define __sanitizer_syscall_pre_nice(increment) \
+ __sanitizer_syscall_pre_impl_nice((long)(increment))
+#define __sanitizer_syscall_post_nice(res, increment) \
+ __sanitizer_syscall_post_impl_nice(res, (long)(increment))
+#define __sanitizer_syscall_pre_sched_setscheduler(pid, policy, param) \
+ __sanitizer_syscall_pre_impl_sched_setscheduler((long)(pid), (long)(policy), \
+ (long)(param))
+#define __sanitizer_syscall_post_sched_setscheduler(res, pid, policy, param) \
+ __sanitizer_syscall_post_impl_sched_setscheduler( \
+ res, (long)(pid), (long)(policy), (long)(param))
+#define __sanitizer_syscall_pre_sched_setparam(pid, param) \
+ __sanitizer_syscall_pre_impl_sched_setparam((long)(pid), (long)(param))
+#define __sanitizer_syscall_post_sched_setparam(res, pid, param) \
+ __sanitizer_syscall_post_impl_sched_setparam(res, (long)(pid), (long)(param))
+#define __sanitizer_syscall_pre_sched_getscheduler(pid) \
+ __sanitizer_syscall_pre_impl_sched_getscheduler((long)(pid))
+#define __sanitizer_syscall_post_sched_getscheduler(res, pid) \
+ __sanitizer_syscall_post_impl_sched_getscheduler(res, (long)(pid))
+#define __sanitizer_syscall_pre_sched_getparam(pid, param) \
+ __sanitizer_syscall_pre_impl_sched_getparam((long)(pid), (long)(param))
+#define __sanitizer_syscall_post_sched_getparam(res, pid, param) \
+ __sanitizer_syscall_post_impl_sched_getparam(res, (long)(pid), (long)(param))
+#define __sanitizer_syscall_pre_sched_setaffinity(pid, len, user_mask_ptr) \
+ __sanitizer_syscall_pre_impl_sched_setaffinity((long)(pid), (long)(len), \
+ (long)(user_mask_ptr))
+#define __sanitizer_syscall_post_sched_setaffinity(res, pid, len, \
+ user_mask_ptr) \
+ __sanitizer_syscall_post_impl_sched_setaffinity( \
+ res, (long)(pid), (long)(len), (long)(user_mask_ptr))
+#define __sanitizer_syscall_pre_sched_getaffinity(pid, len, user_mask_ptr) \
+ __sanitizer_syscall_pre_impl_sched_getaffinity((long)(pid), (long)(len), \
+ (long)(user_mask_ptr))
+#define __sanitizer_syscall_post_sched_getaffinity(res, pid, len, \
+ user_mask_ptr) \
+ __sanitizer_syscall_post_impl_sched_getaffinity( \
+ res, (long)(pid), (long)(len), (long)(user_mask_ptr))
+#define __sanitizer_syscall_pre_sched_yield() \
+ __sanitizer_syscall_pre_impl_sched_yield()
+#define __sanitizer_syscall_post_sched_yield(res) \
+ __sanitizer_syscall_post_impl_sched_yield(res)
+#define __sanitizer_syscall_pre_sched_get_priority_max(policy) \
+ __sanitizer_syscall_pre_impl_sched_get_priority_max((long)(policy))
+#define __sanitizer_syscall_post_sched_get_priority_max(res, policy) \
+ __sanitizer_syscall_post_impl_sched_get_priority_max(res, (long)(policy))
+#define __sanitizer_syscall_pre_sched_get_priority_min(policy) \
+ __sanitizer_syscall_pre_impl_sched_get_priority_min((long)(policy))
+#define __sanitizer_syscall_post_sched_get_priority_min(res, policy) \
+ __sanitizer_syscall_post_impl_sched_get_priority_min(res, (long)(policy))
+#define __sanitizer_syscall_pre_sched_rr_get_interval(pid, interval) \
+ __sanitizer_syscall_pre_impl_sched_rr_get_interval((long)(pid), \
+ (long)(interval))
+#define __sanitizer_syscall_post_sched_rr_get_interval(res, pid, interval) \
+ __sanitizer_syscall_post_impl_sched_rr_get_interval(res, (long)(pid), \
+ (long)(interval))
+#define __sanitizer_syscall_pre_setpriority(which, who, niceval) \
+ __sanitizer_syscall_pre_impl_setpriority((long)(which), (long)(who), \
+ (long)(niceval))
+#define __sanitizer_syscall_post_setpriority(res, which, who, niceval) \
+ __sanitizer_syscall_post_impl_setpriority(res, (long)(which), (long)(who), \
+ (long)(niceval))
+#define __sanitizer_syscall_pre_getpriority(which, who) \
+ __sanitizer_syscall_pre_impl_getpriority((long)(which), (long)(who))
+#define __sanitizer_syscall_post_getpriority(res, which, who) \
+ __sanitizer_syscall_post_impl_getpriority(res, (long)(which), (long)(who))
+#define __sanitizer_syscall_pre_shutdown(arg0, arg1) \
+ __sanitizer_syscall_pre_impl_shutdown((long)(arg0), (long)(arg1))
+#define __sanitizer_syscall_post_shutdown(res, arg0, arg1) \
+ __sanitizer_syscall_post_impl_shutdown(res, (long)(arg0), (long)(arg1))
+#define __sanitizer_syscall_pre_reboot(magic1, magic2, cmd, arg) \
+ __sanitizer_syscall_pre_impl_reboot((long)(magic1), (long)(magic2), \
+ (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_post_reboot(res, magic1, magic2, cmd, arg) \
+ __sanitizer_syscall_post_impl_reboot(res, (long)(magic1), (long)(magic2), \
+ (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_pre_restart_syscall() \
+ __sanitizer_syscall_pre_impl_restart_syscall()
+#define __sanitizer_syscall_post_restart_syscall(res) \
+ __sanitizer_syscall_post_impl_restart_syscall(res)
+#define __sanitizer_syscall_pre_kexec_load(entry, nr_segments, segments, \
+ flags) \
+ __sanitizer_syscall_pre_impl_kexec_load((long)(entry), (long)(nr_segments), \
+ (long)(segments), (long)(flags))
+#define __sanitizer_syscall_post_kexec_load(res, entry, nr_segments, segments, \
+ flags) \
+ __sanitizer_syscall_post_impl_kexec_load(res, (long)(entry), \
+ (long)(nr_segments), \
+ (long)(segments), (long)(flags))
+#define __sanitizer_syscall_pre_exit(error_code) \
+ __sanitizer_syscall_pre_impl_exit((long)(error_code))
+#define __sanitizer_syscall_post_exit(res, error_code) \
+ __sanitizer_syscall_post_impl_exit(res, (long)(error_code))
+#define __sanitizer_syscall_pre_exit_group(error_code) \
+ __sanitizer_syscall_pre_impl_exit_group((long)(error_code))
+#define __sanitizer_syscall_post_exit_group(res, error_code) \
+ __sanitizer_syscall_post_impl_exit_group(res, (long)(error_code))
+#define __sanitizer_syscall_pre_wait4(pid, stat_addr, options, ru) \
+ __sanitizer_syscall_pre_impl_wait4((long)(pid), (long)(stat_addr), \
+ (long)(options), (long)(ru))
+#define __sanitizer_syscall_post_wait4(res, pid, stat_addr, options, ru) \
+ __sanitizer_syscall_post_impl_wait4(res, (long)(pid), (long)(stat_addr), \
+ (long)(options), (long)(ru))
+#define __sanitizer_syscall_pre_waitid(which, pid, infop, options, ru) \
+ __sanitizer_syscall_pre_impl_waitid( \
+ (long)(which), (long)(pid), (long)(infop), (long)(options), (long)(ru))
+#define __sanitizer_syscall_post_waitid(res, which, pid, infop, options, ru) \
+ __sanitizer_syscall_post_impl_waitid(res, (long)(which), (long)(pid), \
+ (long)(infop), (long)(options), \
+ (long)(ru))
+#define __sanitizer_syscall_pre_waitpid(pid, stat_addr, options) \
+ __sanitizer_syscall_pre_impl_waitpid((long)(pid), (long)(stat_addr), \
+ (long)(options))
+#define __sanitizer_syscall_post_waitpid(res, pid, stat_addr, options) \
+ __sanitizer_syscall_post_impl_waitpid(res, (long)(pid), (long)(stat_addr), \
+ (long)(options))
+#define __sanitizer_syscall_pre_set_tid_address(tidptr) \
+ __sanitizer_syscall_pre_impl_set_tid_address((long)(tidptr))
+#define __sanitizer_syscall_post_set_tid_address(res, tidptr) \
+ __sanitizer_syscall_post_impl_set_tid_address(res, (long)(tidptr))
+#define __sanitizer_syscall_pre_init_module(umod, len, uargs) \
+ __sanitizer_syscall_pre_impl_init_module((long)(umod), (long)(len), \
+ (long)(uargs))
+#define __sanitizer_syscall_post_init_module(res, umod, len, uargs) \
+ __sanitizer_syscall_post_impl_init_module(res, (long)(umod), (long)(len), \
+ (long)(uargs))
+#define __sanitizer_syscall_pre_delete_module(name_user, flags) \
+ __sanitizer_syscall_pre_impl_delete_module((long)(name_user), (long)(flags))
+#define __sanitizer_syscall_post_delete_module(res, name_user, flags) \
+ __sanitizer_syscall_post_impl_delete_module(res, (long)(name_user), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_rt_sigprocmask(how, set, oset, sigsetsize) \
+ __sanitizer_syscall_pre_impl_rt_sigprocmask( \
+ (long)(how), (long)(set), (long)(oset), (long)(sigsetsize))
+#define __sanitizer_syscall_post_rt_sigprocmask(res, how, set, oset, \
+ sigsetsize) \
+ __sanitizer_syscall_post_impl_rt_sigprocmask( \
+ res, (long)(how), (long)(set), (long)(oset), (long)(sigsetsize))
+#define __sanitizer_syscall_pre_rt_sigpending(set, sigsetsize) \
+ __sanitizer_syscall_pre_impl_rt_sigpending((long)(set), (long)(sigsetsize))
+#define __sanitizer_syscall_post_rt_sigpending(res, set, sigsetsize) \
+ __sanitizer_syscall_post_impl_rt_sigpending(res, (long)(set), \
+ (long)(sigsetsize))
+#define __sanitizer_syscall_pre_rt_sigtimedwait(uthese, uinfo, uts, \
+ sigsetsize) \
+ __sanitizer_syscall_pre_impl_rt_sigtimedwait( \
+ (long)(uthese), (long)(uinfo), (long)(uts), (long)(sigsetsize))
+#define __sanitizer_syscall_post_rt_sigtimedwait(res, uthese, uinfo, uts, \
+ sigsetsize) \
+ __sanitizer_syscall_post_impl_rt_sigtimedwait( \
+ res, (long)(uthese), (long)(uinfo), (long)(uts), (long)(sigsetsize))
+#define __sanitizer_syscall_pre_rt_tgsigqueueinfo(tgid, pid, sig, uinfo) \
+ __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo((long)(tgid), (long)(pid), \
+ (long)(sig), (long)(uinfo))
+#define __sanitizer_syscall_post_rt_tgsigqueueinfo(res, tgid, pid, sig, uinfo) \
+ __sanitizer_syscall_post_impl_rt_tgsigqueueinfo( \
+ res, (long)(tgid), (long)(pid), (long)(sig), (long)(uinfo))
+#define __sanitizer_syscall_pre_kill(pid, sig) \
+ __sanitizer_syscall_pre_impl_kill((long)(pid), (long)(sig))
+#define __sanitizer_syscall_post_kill(res, pid, sig) \
+ __sanitizer_syscall_post_impl_kill(res, (long)(pid), (long)(sig))
+#define __sanitizer_syscall_pre_tgkill(tgid, pid, sig) \
+ __sanitizer_syscall_pre_impl_tgkill((long)(tgid), (long)(pid), (long)(sig))
+#define __sanitizer_syscall_post_tgkill(res, tgid, pid, sig) \
+ __sanitizer_syscall_post_impl_tgkill(res, (long)(tgid), (long)(pid), \
+ (long)(sig))
+#define __sanitizer_syscall_pre_tkill(pid, sig) \
+ __sanitizer_syscall_pre_impl_tkill((long)(pid), (long)(sig))
+#define __sanitizer_syscall_post_tkill(res, pid, sig) \
+ __sanitizer_syscall_post_impl_tkill(res, (long)(pid), (long)(sig))
+#define __sanitizer_syscall_pre_rt_sigqueueinfo(pid, sig, uinfo) \
+ __sanitizer_syscall_pre_impl_rt_sigqueueinfo((long)(pid), (long)(sig), \
+ (long)(uinfo))
+#define __sanitizer_syscall_post_rt_sigqueueinfo(res, pid, sig, uinfo) \
+ __sanitizer_syscall_post_impl_rt_sigqueueinfo(res, (long)(pid), (long)(sig), \
+ (long)(uinfo))
+#define __sanitizer_syscall_pre_sgetmask() \
+ __sanitizer_syscall_pre_impl_sgetmask()
+#define __sanitizer_syscall_post_sgetmask(res) \
+ __sanitizer_syscall_post_impl_sgetmask(res)
+#define __sanitizer_syscall_pre_ssetmask(newmask) \
+ __sanitizer_syscall_pre_impl_ssetmask((long)(newmask))
+#define __sanitizer_syscall_post_ssetmask(res, newmask) \
+ __sanitizer_syscall_post_impl_ssetmask(res, (long)(newmask))
+#define __sanitizer_syscall_pre_signal(sig, handler) \
+ __sanitizer_syscall_pre_impl_signal((long)(sig), (long)(handler))
+#define __sanitizer_syscall_post_signal(res, sig, handler) \
+ __sanitizer_syscall_post_impl_signal(res, (long)(sig), (long)(handler))
+#define __sanitizer_syscall_pre_pause() __sanitizer_syscall_pre_impl_pause()
+#define __sanitizer_syscall_post_pause(res) \
+ __sanitizer_syscall_post_impl_pause(res)
+#define __sanitizer_syscall_pre_sync() __sanitizer_syscall_pre_impl_sync()
+#define __sanitizer_syscall_post_sync(res) \
+ __sanitizer_syscall_post_impl_sync(res)
+#define __sanitizer_syscall_pre_fsync(fd) \
+ __sanitizer_syscall_pre_impl_fsync((long)(fd))
+#define __sanitizer_syscall_post_fsync(res, fd) \
+ __sanitizer_syscall_post_impl_fsync(res, (long)(fd))
+#define __sanitizer_syscall_pre_fdatasync(fd) \
+ __sanitizer_syscall_pre_impl_fdatasync((long)(fd))
+#define __sanitizer_syscall_post_fdatasync(res, fd) \
+ __sanitizer_syscall_post_impl_fdatasync(res, (long)(fd))
+#define __sanitizer_syscall_pre_bdflush(func, data) \
+ __sanitizer_syscall_pre_impl_bdflush((long)(func), (long)(data))
+#define __sanitizer_syscall_post_bdflush(res, func, data) \
+ __sanitizer_syscall_post_impl_bdflush(res, (long)(func), (long)(data))
+#define __sanitizer_syscall_pre_mount(dev_name, dir_name, type, flags, data) \
+ __sanitizer_syscall_pre_impl_mount((long)(dev_name), (long)(dir_name), \
+ (long)(type), (long)(flags), \
+ (long)(data))
+#define __sanitizer_syscall_post_mount(res, dev_name, dir_name, type, flags, \
+ data) \
+ __sanitizer_syscall_post_impl_mount(res, (long)(dev_name), (long)(dir_name), \
+ (long)(type), (long)(flags), \
+ (long)(data))
+#define __sanitizer_syscall_pre_umount(name, flags) \
+ __sanitizer_syscall_pre_impl_umount((long)(name), (long)(flags))
+#define __sanitizer_syscall_post_umount(res, name, flags) \
+ __sanitizer_syscall_post_impl_umount(res, (long)(name), (long)(flags))
+#define __sanitizer_syscall_pre_oldumount(name) \
+ __sanitizer_syscall_pre_impl_oldumount((long)(name))
+#define __sanitizer_syscall_post_oldumount(res, name) \
+ __sanitizer_syscall_post_impl_oldumount(res, (long)(name))
+#define __sanitizer_syscall_pre_truncate(path, length) \
+ __sanitizer_syscall_pre_impl_truncate((long)(path), (long)(length))
+#define __sanitizer_syscall_post_truncate(res, path, length) \
+ __sanitizer_syscall_post_impl_truncate(res, (long)(path), (long)(length))
+#define __sanitizer_syscall_pre_ftruncate(fd, length) \
+ __sanitizer_syscall_pre_impl_ftruncate((long)(fd), (long)(length))
+#define __sanitizer_syscall_post_ftruncate(res, fd, length) \
+ __sanitizer_syscall_post_impl_ftruncate(res, (long)(fd), (long)(length))
+#define __sanitizer_syscall_pre_stat(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_stat((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_stat(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_stat(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_statfs(path, buf) \
+ __sanitizer_syscall_pre_impl_statfs((long)(path), (long)(buf))
+#define __sanitizer_syscall_post_statfs(res, path, buf) \
+ __sanitizer_syscall_post_impl_statfs(res, (long)(path), (long)(buf))
+#define __sanitizer_syscall_pre_statfs64(path, sz, buf) \
+ __sanitizer_syscall_pre_impl_statfs64((long)(path), (long)(sz), (long)(buf))
+#define __sanitizer_syscall_post_statfs64(res, path, sz, buf) \
+ __sanitizer_syscall_post_impl_statfs64(res, (long)(path), (long)(sz), \
+ (long)(buf))
+#define __sanitizer_syscall_pre_fstatfs(fd, buf) \
+ __sanitizer_syscall_pre_impl_fstatfs((long)(fd), (long)(buf))
+#define __sanitizer_syscall_post_fstatfs(res, fd, buf) \
+ __sanitizer_syscall_post_impl_fstatfs(res, (long)(fd), (long)(buf))
+#define __sanitizer_syscall_pre_fstatfs64(fd, sz, buf) \
+ __sanitizer_syscall_pre_impl_fstatfs64((long)(fd), (long)(sz), (long)(buf))
+#define __sanitizer_syscall_post_fstatfs64(res, fd, sz, buf) \
+ __sanitizer_syscall_post_impl_fstatfs64(res, (long)(fd), (long)(sz), \
+ (long)(buf))
+#define __sanitizer_syscall_pre_lstat(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_lstat((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_lstat(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_lstat(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_fstat(fd, statbuf) \
+ __sanitizer_syscall_pre_impl_fstat((long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_post_fstat(res, fd, statbuf) \
+ __sanitizer_syscall_post_impl_fstat(res, (long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_pre_newstat(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_newstat((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_newstat(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_newstat(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_newlstat(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_newlstat((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_newlstat(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_newlstat(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_newfstat(fd, statbuf) \
+ __sanitizer_syscall_pre_impl_newfstat((long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_post_newfstat(res, fd, statbuf) \
+ __sanitizer_syscall_post_impl_newfstat(res, (long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_pre_ustat(dev, ubuf) \
+ __sanitizer_syscall_pre_impl_ustat((long)(dev), (long)(ubuf))
+#define __sanitizer_syscall_post_ustat(res, dev, ubuf) \
+ __sanitizer_syscall_post_impl_ustat(res, (long)(dev), (long)(ubuf))
+#define __sanitizer_syscall_pre_stat64(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_stat64((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_stat64(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_stat64(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_fstat64(fd, statbuf) \
+ __sanitizer_syscall_pre_impl_fstat64((long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_post_fstat64(res, fd, statbuf) \
+ __sanitizer_syscall_post_impl_fstat64(res, (long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_pre_lstat64(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_lstat64((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_lstat64(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_lstat64(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_setxattr(path, name, value, size, flags) \
+ __sanitizer_syscall_pre_impl_setxattr( \
+ (long)(path), (long)(name), (long)(value), (long)(size), (long)(flags))
+#define __sanitizer_syscall_post_setxattr(res, path, name, value, size, flags) \
+ __sanitizer_syscall_post_impl_setxattr(res, (long)(path), (long)(name), \
+ (long)(value), (long)(size), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_lsetxattr(path, name, value, size, flags) \
+ __sanitizer_syscall_pre_impl_lsetxattr( \
+ (long)(path), (long)(name), (long)(value), (long)(size), (long)(flags))
+#define __sanitizer_syscall_post_lsetxattr(res, path, name, value, size, \
+ flags) \
+ __sanitizer_syscall_post_impl_lsetxattr(res, (long)(path), (long)(name), \
+ (long)(value), (long)(size), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_fsetxattr(fd, name, value, size, flags) \
+ __sanitizer_syscall_pre_impl_fsetxattr( \
+ (long)(fd), (long)(name), (long)(value), (long)(size), (long)(flags))
+#define __sanitizer_syscall_post_fsetxattr(res, fd, name, value, size, flags) \
+ __sanitizer_syscall_post_impl_fsetxattr(res, (long)(fd), (long)(name), \
+ (long)(value), (long)(size), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_getxattr(path, name, value, size) \
+ __sanitizer_syscall_pre_impl_getxattr((long)(path), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_post_getxattr(res, path, name, value, size) \
+ __sanitizer_syscall_post_impl_getxattr(res, (long)(path), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_pre_lgetxattr(path, name, value, size) \
+ __sanitizer_syscall_pre_impl_lgetxattr((long)(path), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_post_lgetxattr(res, path, name, value, size) \
+ __sanitizer_syscall_post_impl_lgetxattr(res, (long)(path), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_pre_fgetxattr(fd, name, value, size) \
+ __sanitizer_syscall_pre_impl_fgetxattr((long)(fd), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_post_fgetxattr(res, fd, name, value, size) \
+ __sanitizer_syscall_post_impl_fgetxattr(res, (long)(fd), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_pre_listxattr(path, list, size) \
+ __sanitizer_syscall_pre_impl_listxattr((long)(path), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_post_listxattr(res, path, list, size) \
+ __sanitizer_syscall_post_impl_listxattr(res, (long)(path), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_pre_llistxattr(path, list, size) \
+ __sanitizer_syscall_pre_impl_llistxattr((long)(path), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_post_llistxattr(res, path, list, size) \
+ __sanitizer_syscall_post_impl_llistxattr(res, (long)(path), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_pre_flistxattr(fd, list, size) \
+ __sanitizer_syscall_pre_impl_flistxattr((long)(fd), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_post_flistxattr(res, fd, list, size) \
+ __sanitizer_syscall_post_impl_flistxattr(res, (long)(fd), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_pre_removexattr(path, name) \
+ __sanitizer_syscall_pre_impl_removexattr((long)(path), (long)(name))
+#define __sanitizer_syscall_post_removexattr(res, path, name) \
+ __sanitizer_syscall_post_impl_removexattr(res, (long)(path), (long)(name))
+#define __sanitizer_syscall_pre_lremovexattr(path, name) \
+ __sanitizer_syscall_pre_impl_lremovexattr((long)(path), (long)(name))
+#define __sanitizer_syscall_post_lremovexattr(res, path, name) \
+ __sanitizer_syscall_post_impl_lremovexattr(res, (long)(path), (long)(name))
+#define __sanitizer_syscall_pre_fremovexattr(fd, name) \
+ __sanitizer_syscall_pre_impl_fremovexattr((long)(fd), (long)(name))
+#define __sanitizer_syscall_post_fremovexattr(res, fd, name) \
+ __sanitizer_syscall_post_impl_fremovexattr(res, (long)(fd), (long)(name))
+#define __sanitizer_syscall_pre_brk(brk) \
+ __sanitizer_syscall_pre_impl_brk((long)(brk))
+#define __sanitizer_syscall_post_brk(res, brk) \
+ __sanitizer_syscall_post_impl_brk(res, (long)(brk))
+#define __sanitizer_syscall_pre_mprotect(start, len, prot) \
+ __sanitizer_syscall_pre_impl_mprotect((long)(start), (long)(len), \
+ (long)(prot))
+#define __sanitizer_syscall_post_mprotect(res, start, len, prot) \
+ __sanitizer_syscall_post_impl_mprotect(res, (long)(start), (long)(len), \
+ (long)(prot))
+#define __sanitizer_syscall_pre_mremap(addr, old_len, new_len, flags, \
+ new_addr) \
+ __sanitizer_syscall_pre_impl_mremap((long)(addr), (long)(old_len), \
+ (long)(new_len), (long)(flags), \
+ (long)(new_addr))
+#define __sanitizer_syscall_post_mremap(res, addr, old_len, new_len, flags, \
+ new_addr) \
+ __sanitizer_syscall_post_impl_mremap(res, (long)(addr), (long)(old_len), \
+ (long)(new_len), (long)(flags), \
+ (long)(new_addr))
+#define __sanitizer_syscall_pre_remap_file_pages(start, size, prot, pgoff, \
+ flags) \
+ __sanitizer_syscall_pre_impl_remap_file_pages( \
+ (long)(start), (long)(size), (long)(prot), (long)(pgoff), (long)(flags))
+#define __sanitizer_syscall_post_remap_file_pages(res, start, size, prot, \
+ pgoff, flags) \
+ __sanitizer_syscall_post_impl_remap_file_pages(res, (long)(start), \
+ (long)(size), (long)(prot), \
+ (long)(pgoff), (long)(flags))
+#define __sanitizer_syscall_pre_msync(start, len, flags) \
+ __sanitizer_syscall_pre_impl_msync((long)(start), (long)(len), (long)(flags))
+#define __sanitizer_syscall_post_msync(res, start, len, flags) \
+ __sanitizer_syscall_post_impl_msync(res, (long)(start), (long)(len), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_munmap(addr, len) \
+ __sanitizer_syscall_pre_impl_munmap((long)(addr), (long)(len))
+#define __sanitizer_syscall_post_munmap(res, addr, len) \
+ __sanitizer_syscall_post_impl_munmap(res, (long)(addr), (long)(len))
+#define __sanitizer_syscall_pre_mlock(start, len) \
+ __sanitizer_syscall_pre_impl_mlock((long)(start), (long)(len))
+#define __sanitizer_syscall_post_mlock(res, start, len) \
+ __sanitizer_syscall_post_impl_mlock(res, (long)(start), (long)(len))
+#define __sanitizer_syscall_pre_munlock(start, len) \
+ __sanitizer_syscall_pre_impl_munlock((long)(start), (long)(len))
+#define __sanitizer_syscall_post_munlock(res, start, len) \
+ __sanitizer_syscall_post_impl_munlock(res, (long)(start), (long)(len))
+#define __sanitizer_syscall_pre_mlockall(flags) \
+ __sanitizer_syscall_pre_impl_mlockall((long)(flags))
+#define __sanitizer_syscall_post_mlockall(res, flags) \
+ __sanitizer_syscall_post_impl_mlockall(res, (long)(flags))
+#define __sanitizer_syscall_pre_munlockall() \
+ __sanitizer_syscall_pre_impl_munlockall()
+#define __sanitizer_syscall_post_munlockall(res) \
+ __sanitizer_syscall_post_impl_munlockall(res)
+#define __sanitizer_syscall_pre_madvise(start, len, behavior) \
+ __sanitizer_syscall_pre_impl_madvise((long)(start), (long)(len), \
+ (long)(behavior))
+#define __sanitizer_syscall_post_madvise(res, start, len, behavior) \
+ __sanitizer_syscall_post_impl_madvise(res, (long)(start), (long)(len), \
+ (long)(behavior))
+#define __sanitizer_syscall_pre_mincore(start, len, vec) \
+ __sanitizer_syscall_pre_impl_mincore((long)(start), (long)(len), (long)(vec))
+#define __sanitizer_syscall_post_mincore(res, start, len, vec) \
+ __sanitizer_syscall_post_impl_mincore(res, (long)(start), (long)(len), \
+ (long)(vec))
+#define __sanitizer_syscall_pre_pivot_root(new_root, put_old) \
+ __sanitizer_syscall_pre_impl_pivot_root((long)(new_root), (long)(put_old))
+#define __sanitizer_syscall_post_pivot_root(res, new_root, put_old) \
+ __sanitizer_syscall_post_impl_pivot_root(res, (long)(new_root), \
+ (long)(put_old))
+#define __sanitizer_syscall_pre_chroot(filename) \
+ __sanitizer_syscall_pre_impl_chroot((long)(filename))
+#define __sanitizer_syscall_post_chroot(res, filename) \
+ __sanitizer_syscall_post_impl_chroot(res, (long)(filename))
+#define __sanitizer_syscall_pre_mknod(filename, mode, dev) \
+ __sanitizer_syscall_pre_impl_mknod((long)(filename), (long)(mode), \
+ (long)(dev))
+#define __sanitizer_syscall_post_mknod(res, filename, mode, dev) \
+ __sanitizer_syscall_post_impl_mknod(res, (long)(filename), (long)(mode), \
+ (long)(dev))
+#define __sanitizer_syscall_pre_link(oldname, newname) \
+ __sanitizer_syscall_pre_impl_link((long)(oldname), (long)(newname))
+#define __sanitizer_syscall_post_link(res, oldname, newname) \
+ __sanitizer_syscall_post_impl_link(res, (long)(oldname), (long)(newname))
+#define __sanitizer_syscall_pre_symlink(old, new_) \
+ __sanitizer_syscall_pre_impl_symlink((long)(old), (long)(new_))
+#define __sanitizer_syscall_post_symlink(res, old, new_) \
+ __sanitizer_syscall_post_impl_symlink(res, (long)(old), (long)(new_))
+#define __sanitizer_syscall_pre_unlink(pathname) \
+ __sanitizer_syscall_pre_impl_unlink((long)(pathname))
+#define __sanitizer_syscall_post_unlink(res, pathname) \
+ __sanitizer_syscall_post_impl_unlink(res, (long)(pathname))
+#define __sanitizer_syscall_pre_rename(oldname, newname) \
+ __sanitizer_syscall_pre_impl_rename((long)(oldname), (long)(newname))
+#define __sanitizer_syscall_post_rename(res, oldname, newname) \
+ __sanitizer_syscall_post_impl_rename(res, (long)(oldname), (long)(newname))
+#define __sanitizer_syscall_pre_chmod(filename, mode) \
+ __sanitizer_syscall_pre_impl_chmod((long)(filename), (long)(mode))
+#define __sanitizer_syscall_post_chmod(res, filename, mode) \
+ __sanitizer_syscall_post_impl_chmod(res, (long)(filename), (long)(mode))
+#define __sanitizer_syscall_pre_fchmod(fd, mode) \
+ __sanitizer_syscall_pre_impl_fchmod((long)(fd), (long)(mode))
+#define __sanitizer_syscall_post_fchmod(res, fd, mode) \
+ __sanitizer_syscall_post_impl_fchmod(res, (long)(fd), (long)(mode))
+#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg) \
+ __sanitizer_syscall_pre_impl_fcntl((long)(fd), (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_post_fcntl(res, fd, cmd, arg) \
+ __sanitizer_syscall_post_impl_fcntl(res, (long)(fd), (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_pre_fcntl64(fd, cmd, arg) \
+ __sanitizer_syscall_pre_impl_fcntl64((long)(fd), (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_post_fcntl64(res, fd, cmd, arg) \
+ __sanitizer_syscall_post_impl_fcntl64(res, (long)(fd), (long)(cmd), \
+ (long)(arg))
+#define __sanitizer_syscall_pre_pipe(fildes) \
+ __sanitizer_syscall_pre_impl_pipe((long)(fildes))
+#define __sanitizer_syscall_post_pipe(res, fildes) \
+ __sanitizer_syscall_post_impl_pipe(res, (long)(fildes))
+#define __sanitizer_syscall_pre_pipe2(fildes, flags) \
+ __sanitizer_syscall_pre_impl_pipe2((long)(fildes), (long)(flags))
+#define __sanitizer_syscall_post_pipe2(res, fildes, flags) \
+ __sanitizer_syscall_post_impl_pipe2(res, (long)(fildes), (long)(flags))
+#define __sanitizer_syscall_pre_dup(fildes) \
+ __sanitizer_syscall_pre_impl_dup((long)(fildes))
+#define __sanitizer_syscall_post_dup(res, fildes) \
+ __sanitizer_syscall_post_impl_dup(res, (long)(fildes))
+#define __sanitizer_syscall_pre_dup2(oldfd, newfd) \
+ __sanitizer_syscall_pre_impl_dup2((long)(oldfd), (long)(newfd))
+#define __sanitizer_syscall_post_dup2(res, oldfd, newfd) \
+ __sanitizer_syscall_post_impl_dup2(res, (long)(oldfd), (long)(newfd))
+#define __sanitizer_syscall_pre_dup3(oldfd, newfd, flags) \
+ __sanitizer_syscall_pre_impl_dup3((long)(oldfd), (long)(newfd), (long)(flags))
+#define __sanitizer_syscall_post_dup3(res, oldfd, newfd, flags) \
+ __sanitizer_syscall_post_impl_dup3(res, (long)(oldfd), (long)(newfd), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_ioperm(from, num, on) \
+ __sanitizer_syscall_pre_impl_ioperm((long)(from), (long)(num), (long)(on))
+#define __sanitizer_syscall_post_ioperm(res, from, num, on) \
+ __sanitizer_syscall_post_impl_ioperm(res, (long)(from), (long)(num), \
+ (long)(on))
+#define __sanitizer_syscall_pre_ioctl(fd, cmd, arg) \
+ __sanitizer_syscall_pre_impl_ioctl((long)(fd), (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_post_ioctl(res, fd, cmd, arg) \
+ __sanitizer_syscall_post_impl_ioctl(res, (long)(fd), (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_pre_flock(fd, cmd) \
+ __sanitizer_syscall_pre_impl_flock((long)(fd), (long)(cmd))
+#define __sanitizer_syscall_post_flock(res, fd, cmd) \
+ __sanitizer_syscall_post_impl_flock(res, (long)(fd), (long)(cmd))
+#define __sanitizer_syscall_pre_io_setup(nr_reqs, ctx) \
+ __sanitizer_syscall_pre_impl_io_setup((long)(nr_reqs), (long)(ctx))
+#define __sanitizer_syscall_post_io_setup(res, nr_reqs, ctx) \
+ __sanitizer_syscall_post_impl_io_setup(res, (long)(nr_reqs), (long)(ctx))
+#define __sanitizer_syscall_pre_io_destroy(ctx) \
+ __sanitizer_syscall_pre_impl_io_destroy((long)(ctx))
+#define __sanitizer_syscall_post_io_destroy(res, ctx) \
+ __sanitizer_syscall_post_impl_io_destroy(res, (long)(ctx))
+#define __sanitizer_syscall_pre_io_getevents(ctx_id, min_nr, nr, events, \
+ timeout) \
+ __sanitizer_syscall_pre_impl_io_getevents((long)(ctx_id), (long)(min_nr), \
+ (long)(nr), (long)(events), \
+ (long)(timeout))
+#define __sanitizer_syscall_post_io_getevents(res, ctx_id, min_nr, nr, events, \
+ timeout) \
+ __sanitizer_syscall_post_impl_io_getevents(res, (long)(ctx_id), \
+ (long)(min_nr), (long)(nr), \
+ (long)(events), (long)(timeout))
+#define __sanitizer_syscall_pre_io_submit(ctx_id, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_io_submit((long)(ctx_id), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_post_io_submit(res, ctx_id, arg1, arg2) \
+ __sanitizer_syscall_post_impl_io_submit(res, (long)(ctx_id), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_io_cancel(ctx_id, iocb, result) \
+ __sanitizer_syscall_pre_impl_io_cancel((long)(ctx_id), (long)(iocb), \
+ (long)(result))
+#define __sanitizer_syscall_post_io_cancel(res, ctx_id, iocb, result) \
+ __sanitizer_syscall_post_impl_io_cancel(res, (long)(ctx_id), (long)(iocb), \
+ (long)(result))
+#define __sanitizer_syscall_pre_sendfile(out_fd, in_fd, offset, count) \
+ __sanitizer_syscall_pre_impl_sendfile((long)(out_fd), (long)(in_fd), \
+ (long)(offset), (long)(count))
+#define __sanitizer_syscall_post_sendfile(res, out_fd, in_fd, offset, count) \
+ __sanitizer_syscall_post_impl_sendfile(res, (long)(out_fd), (long)(in_fd), \
+ (long)(offset), (long)(count))
+#define __sanitizer_syscall_pre_sendfile64(out_fd, in_fd, offset, count) \
+ __sanitizer_syscall_pre_impl_sendfile64((long)(out_fd), (long)(in_fd), \
+ (long)(offset), (long)(count))
+#define __sanitizer_syscall_post_sendfile64(res, out_fd, in_fd, offset, count) \
+ __sanitizer_syscall_post_impl_sendfile64(res, (long)(out_fd), (long)(in_fd), \
+ (long)(offset), (long)(count))
+#define __sanitizer_syscall_pre_readlink(path, buf, bufsiz) \
+ __sanitizer_syscall_pre_impl_readlink((long)(path), (long)(buf), \
+ (long)(bufsiz))
+#define __sanitizer_syscall_post_readlink(res, path, buf, bufsiz) \
+ __sanitizer_syscall_post_impl_readlink(res, (long)(path), (long)(buf), \
+ (long)(bufsiz))
+#define __sanitizer_syscall_pre_creat(pathname, mode) \
+ __sanitizer_syscall_pre_impl_creat((long)(pathname), (long)(mode))
+#define __sanitizer_syscall_post_creat(res, pathname, mode) \
+ __sanitizer_syscall_post_impl_creat(res, (long)(pathname), (long)(mode))
+#define __sanitizer_syscall_pre_open(filename, flags, mode) \
+ __sanitizer_syscall_pre_impl_open((long)(filename), (long)(flags), \
+ (long)(mode))
+#define __sanitizer_syscall_post_open(res, filename, flags, mode) \
+ __sanitizer_syscall_post_impl_open(res, (long)(filename), (long)(flags), \
+ (long)(mode))
+#define __sanitizer_syscall_pre_close(fd) \
+ __sanitizer_syscall_pre_impl_close((long)(fd))
+#define __sanitizer_syscall_post_close(res, fd) \
+ __sanitizer_syscall_post_impl_close(res, (long)(fd))
+#define __sanitizer_syscall_pre_access(filename, mode) \
+ __sanitizer_syscall_pre_impl_access((long)(filename), (long)(mode))
+#define __sanitizer_syscall_post_access(res, filename, mode) \
+ __sanitizer_syscall_post_impl_access(res, (long)(filename), (long)(mode))
+#define __sanitizer_syscall_pre_vhangup() __sanitizer_syscall_pre_impl_vhangup()
+#define __sanitizer_syscall_post_vhangup(res) \
+ __sanitizer_syscall_post_impl_vhangup(res)
+#define __sanitizer_syscall_pre_chown(filename, user, group) \
+ __sanitizer_syscall_pre_impl_chown((long)(filename), (long)(user), \
+ (long)(group))
+#define __sanitizer_syscall_post_chown(res, filename, user, group) \
+ __sanitizer_syscall_post_impl_chown(res, (long)(filename), (long)(user), \
+ (long)(group))
+#define __sanitizer_syscall_pre_lchown(filename, user, group) \
+ __sanitizer_syscall_pre_impl_lchown((long)(filename), (long)(user), \
+ (long)(group))
+#define __sanitizer_syscall_post_lchown(res, filename, user, group) \
+ __sanitizer_syscall_post_impl_lchown(res, (long)(filename), (long)(user), \
+ (long)(group))
+#define __sanitizer_syscall_pre_fchown(fd, user, group) \
+ __sanitizer_syscall_pre_impl_fchown((long)(fd), (long)(user), (long)(group))
+#define __sanitizer_syscall_post_fchown(res, fd, user, group) \
+ __sanitizer_syscall_post_impl_fchown(res, (long)(fd), (long)(user), \
+ (long)(group))
+#define __sanitizer_syscall_pre_chown16(filename, user, group) \
+ __sanitizer_syscall_pre_impl_chown16((long)(filename), (long)user, \
+ (long)group)
+#define __sanitizer_syscall_post_chown16(res, filename, user, group) \
+ __sanitizer_syscall_post_impl_chown16(res, (long)(filename), (long)user, \
+ (long)group)
+#define __sanitizer_syscall_pre_lchown16(filename, user, group) \
+ __sanitizer_syscall_pre_impl_lchown16((long)(filename), (long)user, \
+ (long)group)
+#define __sanitizer_syscall_post_lchown16(res, filename, user, group) \
+ __sanitizer_syscall_post_impl_lchown16(res, (long)(filename), (long)user, \
+ (long)group)
+#define __sanitizer_syscall_pre_fchown16(fd, user, group) \
+ __sanitizer_syscall_pre_impl_fchown16((long)(fd), (long)user, (long)group)
+#define __sanitizer_syscall_post_fchown16(res, fd, user, group) \
+ __sanitizer_syscall_post_impl_fchown16(res, (long)(fd), (long)user, \
+ (long)group)
+#define __sanitizer_syscall_pre_setregid16(rgid, egid) \
+ __sanitizer_syscall_pre_impl_setregid16((long)rgid, (long)egid)
+#define __sanitizer_syscall_post_setregid16(res, rgid, egid) \
+ __sanitizer_syscall_post_impl_setregid16(res, (long)rgid, (long)egid)
+#define __sanitizer_syscall_pre_setgid16(gid) \
+ __sanitizer_syscall_pre_impl_setgid16((long)gid)
+#define __sanitizer_syscall_post_setgid16(res, gid) \
+ __sanitizer_syscall_post_impl_setgid16(res, (long)gid)
+#define __sanitizer_syscall_pre_setreuid16(ruid, euid) \
+ __sanitizer_syscall_pre_impl_setreuid16((long)ruid, (long)euid)
+#define __sanitizer_syscall_post_setreuid16(res, ruid, euid) \
+ __sanitizer_syscall_post_impl_setreuid16(res, (long)ruid, (long)euid)
+#define __sanitizer_syscall_pre_setuid16(uid) \
+ __sanitizer_syscall_pre_impl_setuid16((long)uid)
+#define __sanitizer_syscall_post_setuid16(res, uid) \
+ __sanitizer_syscall_post_impl_setuid16(res, (long)uid)
+#define __sanitizer_syscall_pre_setresuid16(ruid, euid, suid) \
+ __sanitizer_syscall_pre_impl_setresuid16((long)ruid, (long)euid, (long)suid)
+#define __sanitizer_syscall_post_setresuid16(res, ruid, euid, suid) \
+ __sanitizer_syscall_post_impl_setresuid16(res, (long)ruid, (long)euid, \
+ (long)suid)
+#define __sanitizer_syscall_pre_getresuid16(ruid, euid, suid) \
+ __sanitizer_syscall_pre_impl_getresuid16((long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_post_getresuid16(res, ruid, euid, suid) \
+ __sanitizer_syscall_post_impl_getresuid16(res, (long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_pre_setresgid16(rgid, egid, sgid) \
+ __sanitizer_syscall_pre_impl_setresgid16((long)rgid, (long)egid, (long)sgid)
+#define __sanitizer_syscall_post_setresgid16(res, rgid, egid, sgid) \
+ __sanitizer_syscall_post_impl_setresgid16(res, (long)rgid, (long)egid, \
+ (long)sgid)
+#define __sanitizer_syscall_pre_getresgid16(rgid, egid, sgid) \
+ __sanitizer_syscall_pre_impl_getresgid16((long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_post_getresgid16(res, rgid, egid, sgid) \
+ __sanitizer_syscall_post_impl_getresgid16(res, (long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_pre_setfsuid16(uid) \
+ __sanitizer_syscall_pre_impl_setfsuid16((long)uid)
+#define __sanitizer_syscall_post_setfsuid16(res, uid) \
+ __sanitizer_syscall_post_impl_setfsuid16(res, (long)uid)
+#define __sanitizer_syscall_pre_setfsgid16(gid) \
+ __sanitizer_syscall_pre_impl_setfsgid16((long)gid)
+#define __sanitizer_syscall_post_setfsgid16(res, gid) \
+ __sanitizer_syscall_post_impl_setfsgid16(res, (long)gid)
+#define __sanitizer_syscall_pre_getgroups16(gidsetsize, grouplist) \
+ __sanitizer_syscall_pre_impl_getgroups16((long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_post_getgroups16(res, gidsetsize, grouplist) \
+ __sanitizer_syscall_post_impl_getgroups16(res, (long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_pre_setgroups16(gidsetsize, grouplist) \
+ __sanitizer_syscall_pre_impl_setgroups16((long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_post_setgroups16(res, gidsetsize, grouplist) \
+ __sanitizer_syscall_post_impl_setgroups16(res, (long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_pre_getuid16() \
+ __sanitizer_syscall_pre_impl_getuid16()
+#define __sanitizer_syscall_post_getuid16(res) \
+ __sanitizer_syscall_post_impl_getuid16(res)
+#define __sanitizer_syscall_pre_geteuid16() \
+ __sanitizer_syscall_pre_impl_geteuid16()
+#define __sanitizer_syscall_post_geteuid16(res) \
+ __sanitizer_syscall_post_impl_geteuid16(res)
+#define __sanitizer_syscall_pre_getgid16() \
+ __sanitizer_syscall_pre_impl_getgid16()
+#define __sanitizer_syscall_post_getgid16(res) \
+ __sanitizer_syscall_post_impl_getgid16(res)
+#define __sanitizer_syscall_pre_getegid16() \
+ __sanitizer_syscall_pre_impl_getegid16()
+#define __sanitizer_syscall_post_getegid16(res) \
+ __sanitizer_syscall_post_impl_getegid16(res)
+#define __sanitizer_syscall_pre_utime(filename, times) \
+ __sanitizer_syscall_pre_impl_utime((long)(filename), (long)(times))
+#define __sanitizer_syscall_post_utime(res, filename, times) \
+ __sanitizer_syscall_post_impl_utime(res, (long)(filename), (long)(times))
+#define __sanitizer_syscall_pre_utimes(filename, utimes) \
+ __sanitizer_syscall_pre_impl_utimes((long)(filename), (long)(utimes))
+#define __sanitizer_syscall_post_utimes(res, filename, utimes) \
+ __sanitizer_syscall_post_impl_utimes(res, (long)(filename), (long)(utimes))
+#define __sanitizer_syscall_pre_lseek(fd, offset, origin) \
+ __sanitizer_syscall_pre_impl_lseek((long)(fd), (long)(offset), (long)(origin))
+#define __sanitizer_syscall_post_lseek(res, fd, offset, origin) \
+ __sanitizer_syscall_post_impl_lseek(res, (long)(fd), (long)(offset), \
+ (long)(origin))
+#define __sanitizer_syscall_pre_llseek(fd, offset_high, offset_low, result, \
+ origin) \
+ __sanitizer_syscall_pre_impl_llseek((long)(fd), (long)(offset_high), \
+ (long)(offset_low), (long)(result), \
+ (long)(origin))
+#define __sanitizer_syscall_post_llseek(res, fd, offset_high, offset_low, \
+ result, origin) \
+ __sanitizer_syscall_post_impl_llseek(res, (long)(fd), (long)(offset_high), \
+ (long)(offset_low), (long)(result), \
+ (long)(origin))
+#define __sanitizer_syscall_pre_read(fd, buf, count) \
+ __sanitizer_syscall_pre_impl_read((long)(fd), (long)(buf), (long)(count))
+#define __sanitizer_syscall_post_read(res, fd, buf, count) \
+ __sanitizer_syscall_post_impl_read(res, (long)(fd), (long)(buf), \
+ (long)(count))
+#define __sanitizer_syscall_pre_readv(fd, vec, vlen) \
+ __sanitizer_syscall_pre_impl_readv((long)(fd), (long)(vec), (long)(vlen))
+#define __sanitizer_syscall_post_readv(res, fd, vec, vlen) \
+ __sanitizer_syscall_post_impl_readv(res, (long)(fd), (long)(vec), \
+ (long)(vlen))
+#define __sanitizer_syscall_pre_write(fd, buf, count) \
+ __sanitizer_syscall_pre_impl_write((long)(fd), (long)(buf), (long)(count))
+#define __sanitizer_syscall_post_write(res, fd, buf, count) \
+ __sanitizer_syscall_post_impl_write(res, (long)(fd), (long)(buf), \
+ (long)(count))
+#define __sanitizer_syscall_pre_writev(fd, vec, vlen) \
+ __sanitizer_syscall_pre_impl_writev((long)(fd), (long)(vec), (long)(vlen))
+#define __sanitizer_syscall_post_writev(res, fd, vec, vlen) \
+ __sanitizer_syscall_post_impl_writev(res, (long)(fd), (long)(vec), \
+ (long)(vlen))
+
+#ifdef _LP64
+#define __sanitizer_syscall_pre_pread64(fd, buf, count, pos) \
+ __sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \
+ (long)(pos))
+#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos) \
+ __sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf), \
+ (long)(count), (long)(pos))
+#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos) \
+ __sanitizer_syscall_pre_impl_pwrite64((long)(fd), (long)(buf), \
+ (long)(count), (long)(pos))
+#define __sanitizer_syscall_post_pwrite64(res, fd, buf, count, pos) \
+ __sanitizer_syscall_post_impl_pwrite64(res, (long)(fd), (long)(buf), \
+ (long)(count), (long)(pos))
+#else
+#define __sanitizer_syscall_pre_pread64(fd, buf, count, pos0, pos1) \
+ __sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \
+ (long)(pos0), (long)(pos1))
+#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos0, pos1) \
+ __sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf), \
+ (long)(count), (long)(pos0), \
+ (long)(pos1))
+#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos0, pos1) \
+ __sanitizer_syscall_pre_impl_pwrite64( \
+ (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1))
+#define __sanitizer_syscall_post_pwrite64(res, fd, buf, count, pos0, pos1) \
+ __sanitizer_syscall_post_impl_pwrite64( \
+ res, (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1))
+#endif
+
+#define __sanitizer_syscall_pre_preadv(fd, vec, vlen, pos_l, pos_h) \
+ __sanitizer_syscall_pre_impl_preadv((long)(fd), (long)(vec), (long)(vlen), \
+ (long)(pos_l), (long)(pos_h))
+#define __sanitizer_syscall_post_preadv(res, fd, vec, vlen, pos_l, pos_h) \
+ __sanitizer_syscall_post_impl_preadv(res, (long)(fd), (long)(vec), \
+ (long)(vlen), (long)(pos_l), \
+ (long)(pos_h))
+#define __sanitizer_syscall_pre_pwritev(fd, vec, vlen, pos_l, pos_h) \
+ __sanitizer_syscall_pre_impl_pwritev((long)(fd), (long)(vec), (long)(vlen), \
+ (long)(pos_l), (long)(pos_h))
+#define __sanitizer_syscall_post_pwritev(res, fd, vec, vlen, pos_l, pos_h) \
+ __sanitizer_syscall_post_impl_pwritev(res, (long)(fd), (long)(vec), \
+ (long)(vlen), (long)(pos_l), \
+ (long)(pos_h))
+#define __sanitizer_syscall_pre_getcwd(buf, size) \
+ __sanitizer_syscall_pre_impl_getcwd((long)(buf), (long)(size))
+#define __sanitizer_syscall_post_getcwd(res, buf, size) \
+ __sanitizer_syscall_post_impl_getcwd(res, (long)(buf), (long)(size))
+#define __sanitizer_syscall_pre_mkdir(pathname, mode) \
+ __sanitizer_syscall_pre_impl_mkdir((long)(pathname), (long)(mode))
+#define __sanitizer_syscall_post_mkdir(res, pathname, mode) \
+ __sanitizer_syscall_post_impl_mkdir(res, (long)(pathname), (long)(mode))
+#define __sanitizer_syscall_pre_chdir(filename) \
+ __sanitizer_syscall_pre_impl_chdir((long)(filename))
+#define __sanitizer_syscall_post_chdir(res, filename) \
+ __sanitizer_syscall_post_impl_chdir(res, (long)(filename))
+#define __sanitizer_syscall_pre_fchdir(fd) \
+ __sanitizer_syscall_pre_impl_fchdir((long)(fd))
+#define __sanitizer_syscall_post_fchdir(res, fd) \
+ __sanitizer_syscall_post_impl_fchdir(res, (long)(fd))
+#define __sanitizer_syscall_pre_rmdir(pathname) \
+ __sanitizer_syscall_pre_impl_rmdir((long)(pathname))
+#define __sanitizer_syscall_post_rmdir(res, pathname) \
+ __sanitizer_syscall_post_impl_rmdir(res, (long)(pathname))
+#define __sanitizer_syscall_pre_lookup_dcookie(cookie64, buf, len) \
+ __sanitizer_syscall_pre_impl_lookup_dcookie((long)(cookie64), (long)(buf), \
+ (long)(len))
+#define __sanitizer_syscall_post_lookup_dcookie(res, cookie64, buf, len) \
+ __sanitizer_syscall_post_impl_lookup_dcookie(res, (long)(cookie64), \
+ (long)(buf), (long)(len))
+#define __sanitizer_syscall_pre_quotactl(cmd, special, id, addr) \
+ __sanitizer_syscall_pre_impl_quotactl((long)(cmd), (long)(special), \
+ (long)(id), (long)(addr))
+#define __sanitizer_syscall_post_quotactl(res, cmd, special, id, addr) \
+ __sanitizer_syscall_post_impl_quotactl(res, (long)(cmd), (long)(special), \
+ (long)(id), (long)(addr))
+#define __sanitizer_syscall_pre_getdents(fd, dirent, count) \
+ __sanitizer_syscall_pre_impl_getdents((long)(fd), (long)(dirent), \
+ (long)(count))
+#define __sanitizer_syscall_post_getdents(res, fd, dirent, count) \
+ __sanitizer_syscall_post_impl_getdents(res, (long)(fd), (long)(dirent), \
+ (long)(count))
+#define __sanitizer_syscall_pre_getdents64(fd, dirent, count) \
+ __sanitizer_syscall_pre_impl_getdents64((long)(fd), (long)(dirent), \
+ (long)(count))
+#define __sanitizer_syscall_post_getdents64(res, fd, dirent, count) \
+ __sanitizer_syscall_post_impl_getdents64(res, (long)(fd), (long)(dirent), \
+ (long)(count))
+#define __sanitizer_syscall_pre_setsockopt(fd, level, optname, optval, optlen) \
+ __sanitizer_syscall_pre_impl_setsockopt((long)(fd), (long)(level), \
+ (long)(optname), (long)(optval), \
+ (long)(optlen))
+#define __sanitizer_syscall_post_setsockopt(res, fd, level, optname, optval, \
+ optlen) \
+ __sanitizer_syscall_post_impl_setsockopt(res, (long)(fd), (long)(level), \
+ (long)(optname), (long)(optval), \
+ (long)(optlen))
+#define __sanitizer_syscall_pre_getsockopt(fd, level, optname, optval, optlen) \
+ __sanitizer_syscall_pre_impl_getsockopt((long)(fd), (long)(level), \
+ (long)(optname), (long)(optval), \
+ (long)(optlen))
+#define __sanitizer_syscall_post_getsockopt(res, fd, level, optname, optval, \
+ optlen) \
+ __sanitizer_syscall_post_impl_getsockopt(res, (long)(fd), (long)(level), \
+ (long)(optname), (long)(optval), \
+ (long)(optlen))
+#define __sanitizer_syscall_pre_bind(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_bind((long)(arg0), (long)(arg1), (long)(arg2))
+#define __sanitizer_syscall_post_bind(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_bind(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_connect(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_connect((long)(arg0), (long)(arg1), (long)(arg2))
+#define __sanitizer_syscall_post_connect(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_connect(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_accept(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_accept((long)(arg0), (long)(arg1), (long)(arg2))
+#define __sanitizer_syscall_post_accept(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_accept(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_accept4(arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_pre_impl_accept4((long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_post_accept4(res, arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_post_impl_accept4(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_pre_getsockname(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_getsockname((long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_post_getsockname(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_getsockname(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_getpeername(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_getpeername((long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_post_getpeername(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_getpeername(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_send(arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_pre_impl_send((long)(arg0), (long)(arg1), (long)(arg2), \
+ (long)(arg3))
+#define __sanitizer_syscall_post_send(res, arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_post_impl_send(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_pre_sendto(arg0, arg1, arg2, arg3, arg4, arg5) \
+ __sanitizer_syscall_pre_impl_sendto((long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_post_sendto(res, arg0, arg1, arg2, arg3, arg4, \
+ arg5) \
+ __sanitizer_syscall_post_impl_sendto(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_pre_sendmsg(fd, msg, flags) \
+ __sanitizer_syscall_pre_impl_sendmsg((long)(fd), (long)(msg), (long)(flags))
+#define __sanitizer_syscall_post_sendmsg(res, fd, msg, flags) \
+ __sanitizer_syscall_post_impl_sendmsg(res, (long)(fd), (long)(msg), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_sendmmsg(fd, msg, vlen, flags) \
+ __sanitizer_syscall_pre_impl_sendmmsg((long)(fd), (long)(msg), (long)(vlen), \
+ (long)(flags))
+#define __sanitizer_syscall_post_sendmmsg(res, fd, msg, vlen, flags) \
+ __sanitizer_syscall_post_impl_sendmmsg(res, (long)(fd), (long)(msg), \
+ (long)(vlen), (long)(flags))
+#define __sanitizer_syscall_pre_recv(arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_pre_impl_recv((long)(arg0), (long)(arg1), (long)(arg2), \
+ (long)(arg3))
+#define __sanitizer_syscall_post_recv(res, arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_post_impl_recv(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_pre_recvfrom(arg0, arg1, arg2, arg3, arg4, arg5) \
+ __sanitizer_syscall_pre_impl_recvfrom((long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_post_recvfrom(res, arg0, arg1, arg2, arg3, arg4, \
+ arg5) \
+ __sanitizer_syscall_post_impl_recvfrom(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_pre_recvmsg(fd, msg, flags) \
+ __sanitizer_syscall_pre_impl_recvmsg((long)(fd), (long)(msg), (long)(flags))
+#define __sanitizer_syscall_post_recvmsg(res, fd, msg, flags) \
+ __sanitizer_syscall_post_impl_recvmsg(res, (long)(fd), (long)(msg), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_recvmmsg(fd, msg, vlen, flags, timeout) \
+ __sanitizer_syscall_pre_impl_recvmmsg((long)(fd), (long)(msg), (long)(vlen), \
+ (long)(flags), (long)(timeout))
+#define __sanitizer_syscall_post_recvmmsg(res, fd, msg, vlen, flags, timeout) \
+ __sanitizer_syscall_post_impl_recvmmsg(res, (long)(fd), (long)(msg), \
+ (long)(vlen), (long)(flags), \
+ (long)(timeout))
+#define __sanitizer_syscall_pre_socket(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_socket((long)(arg0), (long)(arg1), (long)(arg2))
+#define __sanitizer_syscall_post_socket(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_socket(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_socketpair(arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_pre_impl_socketpair((long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_post_socketpair(res, arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_post_impl_socketpair(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_pre_socketcall(call, args) \
+ __sanitizer_syscall_pre_impl_socketcall((long)(call), (long)(args))
+#define __sanitizer_syscall_post_socketcall(res, call, args) \
+ __sanitizer_syscall_post_impl_socketcall(res, (long)(call), (long)(args))
+#define __sanitizer_syscall_pre_listen(arg0, arg1) \
+ __sanitizer_syscall_pre_impl_listen((long)(arg0), (long)(arg1))
+#define __sanitizer_syscall_post_listen(res, arg0, arg1) \
+ __sanitizer_syscall_post_impl_listen(res, (long)(arg0), (long)(arg1))
+#define __sanitizer_syscall_pre_poll(ufds, nfds, timeout) \
+ __sanitizer_syscall_pre_impl_poll((long)(ufds), (long)(nfds), (long)(timeout))
+#define __sanitizer_syscall_post_poll(res, ufds, nfds, timeout) \
+ __sanitizer_syscall_post_impl_poll(res, (long)(ufds), (long)(nfds), \
+ (long)(timeout))
+#define __sanitizer_syscall_pre_select(n, inp, outp, exp, tvp) \
+ __sanitizer_syscall_pre_impl_select((long)(n), (long)(inp), (long)(outp), \
+ (long)(exp), (long)(tvp))
+#define __sanitizer_syscall_post_select(res, n, inp, outp, exp, tvp) \
+ __sanitizer_syscall_post_impl_select(res, (long)(n), (long)(inp), \
+ (long)(outp), (long)(exp), (long)(tvp))
+#define __sanitizer_syscall_pre_old_select(arg) \
+ __sanitizer_syscall_pre_impl_old_select((long)(arg))
+#define __sanitizer_syscall_post_old_select(res, arg) \
+ __sanitizer_syscall_post_impl_old_select(res, (long)(arg))
+#define __sanitizer_syscall_pre_epoll_create(size) \
+ __sanitizer_syscall_pre_impl_epoll_create((long)(size))
+#define __sanitizer_syscall_post_epoll_create(res, size) \
+ __sanitizer_syscall_post_impl_epoll_create(res, (long)(size))
+#define __sanitizer_syscall_pre_epoll_create1(flags) \
+ __sanitizer_syscall_pre_impl_epoll_create1((long)(flags))
+#define __sanitizer_syscall_post_epoll_create1(res, flags) \
+ __sanitizer_syscall_post_impl_epoll_create1(res, (long)(flags))
+#define __sanitizer_syscall_pre_epoll_ctl(epfd, op, fd, event) \
+ __sanitizer_syscall_pre_impl_epoll_ctl((long)(epfd), (long)(op), (long)(fd), \
+ (long)(event))
+#define __sanitizer_syscall_post_epoll_ctl(res, epfd, op, fd, event) \
+ __sanitizer_syscall_post_impl_epoll_ctl(res, (long)(epfd), (long)(op), \
+ (long)(fd), (long)(event))
+#define __sanitizer_syscall_pre_epoll_wait(epfd, events, maxevents, timeout) \
+ __sanitizer_syscall_pre_impl_epoll_wait((long)(epfd), (long)(events), \
+ (long)(maxevents), (long)(timeout))
+#define __sanitizer_syscall_post_epoll_wait(res, epfd, events, maxevents, \
+ timeout) \
+ __sanitizer_syscall_post_impl_epoll_wait(res, (long)(epfd), (long)(events), \
+ (long)(maxevents), (long)(timeout))
+#define __sanitizer_syscall_pre_epoll_pwait(epfd, events, maxevents, timeout, \
+ sigmask, sigsetsize) \
+ __sanitizer_syscall_pre_impl_epoll_pwait( \
+ (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout), \
+ (long)(sigmask), (long)(sigsetsize))
+#define __sanitizer_syscall_post_epoll_pwait(res, epfd, events, maxevents, \
+ timeout, sigmask, sigsetsize) \
+ __sanitizer_syscall_post_impl_epoll_pwait( \
+ res, (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout), \
+ (long)(sigmask), (long)(sigsetsize))
+#define __sanitizer_syscall_pre_gethostname(name, len) \
+ __sanitizer_syscall_pre_impl_gethostname((long)(name), (long)(len))
+#define __sanitizer_syscall_post_gethostname(res, name, len) \
+ __sanitizer_syscall_post_impl_gethostname(res, (long)(name), (long)(len))
+#define __sanitizer_syscall_pre_sethostname(name, len) \
+ __sanitizer_syscall_pre_impl_sethostname((long)(name), (long)(len))
+#define __sanitizer_syscall_post_sethostname(res, name, len) \
+ __sanitizer_syscall_post_impl_sethostname(res, (long)(name), (long)(len))
+#define __sanitizer_syscall_pre_setdomainname(name, len) \
+ __sanitizer_syscall_pre_impl_setdomainname((long)(name), (long)(len))
+#define __sanitizer_syscall_post_setdomainname(res, name, len) \
+ __sanitizer_syscall_post_impl_setdomainname(res, (long)(name), (long)(len))
+#define __sanitizer_syscall_pre_newuname(name) \
+ __sanitizer_syscall_pre_impl_newuname((long)(name))
+#define __sanitizer_syscall_post_newuname(res, name) \
+ __sanitizer_syscall_post_impl_newuname(res, (long)(name))
+#define __sanitizer_syscall_pre_uname(arg0) \
+ __sanitizer_syscall_pre_impl_uname((long)(arg0))
+#define __sanitizer_syscall_post_uname(res, arg0) \
+ __sanitizer_syscall_post_impl_uname(res, (long)(arg0))
+#define __sanitizer_syscall_pre_olduname(arg0) \
+ __sanitizer_syscall_pre_impl_olduname((long)(arg0))
+#define __sanitizer_syscall_post_olduname(res, arg0) \
+ __sanitizer_syscall_post_impl_olduname(res, (long)(arg0))
+#define __sanitizer_syscall_pre_getrlimit(resource, rlim) \
+ __sanitizer_syscall_pre_impl_getrlimit((long)(resource), (long)(rlim))
+#define __sanitizer_syscall_post_getrlimit(res, resource, rlim) \
+ __sanitizer_syscall_post_impl_getrlimit(res, (long)(resource), (long)(rlim))
+#define __sanitizer_syscall_pre_old_getrlimit(resource, rlim) \
+ __sanitizer_syscall_pre_impl_old_getrlimit((long)(resource), (long)(rlim))
+#define __sanitizer_syscall_post_old_getrlimit(res, resource, rlim) \
+ __sanitizer_syscall_post_impl_old_getrlimit(res, (long)(resource), \
+ (long)(rlim))
+#define __sanitizer_syscall_pre_setrlimit(resource, rlim) \
+ __sanitizer_syscall_pre_impl_setrlimit((long)(resource), (long)(rlim))
+#define __sanitizer_syscall_post_setrlimit(res, resource, rlim) \
+ __sanitizer_syscall_post_impl_setrlimit(res, (long)(resource), (long)(rlim))
+#define __sanitizer_syscall_pre_prlimit64(pid, resource, new_rlim, old_rlim) \
+ __sanitizer_syscall_pre_impl_prlimit64((long)(pid), (long)(resource), \
+ (long)(new_rlim), (long)(old_rlim))
+#define __sanitizer_syscall_post_prlimit64(res, pid, resource, new_rlim, \
+ old_rlim) \
+ __sanitizer_syscall_post_impl_prlimit64(res, (long)(pid), (long)(resource), \
+ (long)(new_rlim), (long)(old_rlim))
+#define __sanitizer_syscall_pre_getrusage(who, ru) \
+ __sanitizer_syscall_pre_impl_getrusage((long)(who), (long)(ru))
+#define __sanitizer_syscall_post_getrusage(res, who, ru) \
+ __sanitizer_syscall_post_impl_getrusage(res, (long)(who), (long)(ru))
+#define __sanitizer_syscall_pre_umask(mask) \
+ __sanitizer_syscall_pre_impl_umask((long)(mask))
+#define __sanitizer_syscall_post_umask(res, mask) \
+ __sanitizer_syscall_post_impl_umask(res, (long)(mask))
+#define __sanitizer_syscall_pre_msgget(key, msgflg) \
+ __sanitizer_syscall_pre_impl_msgget((long)(key), (long)(msgflg))
+#define __sanitizer_syscall_post_msgget(res, key, msgflg) \
+ __sanitizer_syscall_post_impl_msgget(res, (long)(key), (long)(msgflg))
+#define __sanitizer_syscall_pre_msgsnd(msqid, msgp, msgsz, msgflg) \
+ __sanitizer_syscall_pre_impl_msgsnd((long)(msqid), (long)(msgp), \
+ (long)(msgsz), (long)(msgflg))
+#define __sanitizer_syscall_post_msgsnd(res, msqid, msgp, msgsz, msgflg) \
+ __sanitizer_syscall_post_impl_msgsnd(res, (long)(msqid), (long)(msgp), \
+ (long)(msgsz), (long)(msgflg))
+#define __sanitizer_syscall_pre_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg) \
+ __sanitizer_syscall_pre_impl_msgrcv((long)(msqid), (long)(msgp), \
+ (long)(msgsz), (long)(msgtyp), \
+ (long)(msgflg))
+#define __sanitizer_syscall_post_msgrcv(res, msqid, msgp, msgsz, msgtyp, \
+ msgflg) \
+ __sanitizer_syscall_post_impl_msgrcv(res, (long)(msqid), (long)(msgp), \
+ (long)(msgsz), (long)(msgtyp), \
+ (long)(msgflg))
+#define __sanitizer_syscall_pre_msgctl(msqid, cmd, buf) \
+ __sanitizer_syscall_pre_impl_msgctl((long)(msqid), (long)(cmd), (long)(buf))
+#define __sanitizer_syscall_post_msgctl(res, msqid, cmd, buf) \
+ __sanitizer_syscall_post_impl_msgctl(res, (long)(msqid), (long)(cmd), \
+ (long)(buf))
+#define __sanitizer_syscall_pre_semget(key, nsems, semflg) \
+ __sanitizer_syscall_pre_impl_semget((long)(key), (long)(nsems), \
+ (long)(semflg))
+#define __sanitizer_syscall_post_semget(res, key, nsems, semflg) \
+ __sanitizer_syscall_post_impl_semget(res, (long)(key), (long)(nsems), \
+ (long)(semflg))
+#define __sanitizer_syscall_pre_semop(semid, sops, nsops) \
+ __sanitizer_syscall_pre_impl_semop((long)(semid), (long)(sops), (long)(nsops))
+#define __sanitizer_syscall_post_semop(res, semid, sops, nsops) \
+ __sanitizer_syscall_post_impl_semop(res, (long)(semid), (long)(sops), \
+ (long)(nsops))
+#define __sanitizer_syscall_pre_semctl(semid, semnum, cmd, arg) \
+ __sanitizer_syscall_pre_impl_semctl((long)(semid), (long)(semnum), \
+ (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_post_semctl(res, semid, semnum, cmd, arg) \
+ __sanitizer_syscall_post_impl_semctl(res, (long)(semid), (long)(semnum), \
+ (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_pre_semtimedop(semid, sops, nsops, timeout) \
+ __sanitizer_syscall_pre_impl_semtimedop((long)(semid), (long)(sops), \
+ (long)(nsops), (long)(timeout))
+#define __sanitizer_syscall_post_semtimedop(res, semid, sops, nsops, timeout) \
+ __sanitizer_syscall_post_impl_semtimedop(res, (long)(semid), (long)(sops), \
+ (long)(nsops), (long)(timeout))
+#define __sanitizer_syscall_pre_shmat(shmid, shmaddr, shmflg) \
+ __sanitizer_syscall_pre_impl_shmat((long)(shmid), (long)(shmaddr), \
+ (long)(shmflg))
+#define __sanitizer_syscall_post_shmat(res, shmid, shmaddr, shmflg) \
+ __sanitizer_syscall_post_impl_shmat(res, (long)(shmid), (long)(shmaddr), \
+ (long)(shmflg))
+#define __sanitizer_syscall_pre_shmget(key, size, flag) \
+ __sanitizer_syscall_pre_impl_shmget((long)(key), (long)(size), (long)(flag))
+#define __sanitizer_syscall_post_shmget(res, key, size, flag) \
+ __sanitizer_syscall_post_impl_shmget(res, (long)(key), (long)(size), \
+ (long)(flag))
+#define __sanitizer_syscall_pre_shmdt(shmaddr) \
+ __sanitizer_syscall_pre_impl_shmdt((long)(shmaddr))
+#define __sanitizer_syscall_post_shmdt(res, shmaddr) \
+ __sanitizer_syscall_post_impl_shmdt(res, (long)(shmaddr))
+#define __sanitizer_syscall_pre_shmctl(shmid, cmd, buf) \
+ __sanitizer_syscall_pre_impl_shmctl((long)(shmid), (long)(cmd), (long)(buf))
+#define __sanitizer_syscall_post_shmctl(res, shmid, cmd, buf) \
+ __sanitizer_syscall_post_impl_shmctl(res, (long)(shmid), (long)(cmd), \
+ (long)(buf))
+#define __sanitizer_syscall_pre_ipc(call, first, second, third, ptr, fifth) \
+ __sanitizer_syscall_pre_impl_ipc((long)(call), (long)(first), \
+ (long)(second), (long)(third), (long)(ptr), \
+ (long)(fifth))
+#define __sanitizer_syscall_post_ipc(res, call, first, second, third, ptr, \
+ fifth) \
+ __sanitizer_syscall_post_impl_ipc(res, (long)(call), (long)(first), \
+ (long)(second), (long)(third), \
+ (long)(ptr), (long)(fifth))
+#define __sanitizer_syscall_pre_mq_open(name, oflag, mode, attr) \
+ __sanitizer_syscall_pre_impl_mq_open((long)(name), (long)(oflag), \
+ (long)(mode), (long)(attr))
+#define __sanitizer_syscall_post_mq_open(res, name, oflag, mode, attr) \
+ __sanitizer_syscall_post_impl_mq_open(res, (long)(name), (long)(oflag), \
+ (long)(mode), (long)(attr))
+#define __sanitizer_syscall_pre_mq_unlink(name) \
+ __sanitizer_syscall_pre_impl_mq_unlink((long)(name))
+#define __sanitizer_syscall_post_mq_unlink(res, name) \
+ __sanitizer_syscall_post_impl_mq_unlink(res, (long)(name))
+#define __sanitizer_syscall_pre_mq_timedsend(mqdes, msg_ptr, msg_len, \
+ msg_prio, abs_timeout) \
+ __sanitizer_syscall_pre_impl_mq_timedsend((long)(mqdes), (long)(msg_ptr), \
+ (long)(msg_len), (long)(msg_prio), \
+ (long)(abs_timeout))
+#define __sanitizer_syscall_post_mq_timedsend(res, mqdes, msg_ptr, msg_len, \
+ msg_prio, abs_timeout) \
+ __sanitizer_syscall_post_impl_mq_timedsend( \
+ res, (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \
+ (long)(abs_timeout))
+#define __sanitizer_syscall_pre_mq_timedreceive(mqdes, msg_ptr, msg_len, \
+ msg_prio, abs_timeout) \
+ __sanitizer_syscall_pre_impl_mq_timedreceive( \
+ (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \
+ (long)(abs_timeout))
+#define __sanitizer_syscall_post_mq_timedreceive(res, mqdes, msg_ptr, msg_len, \
+ msg_prio, abs_timeout) \
+ __sanitizer_syscall_post_impl_mq_timedreceive( \
+ res, (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \
+ (long)(abs_timeout))
+#define __sanitizer_syscall_pre_mq_notify(mqdes, notification) \
+ __sanitizer_syscall_pre_impl_mq_notify((long)(mqdes), (long)(notification))
+#define __sanitizer_syscall_post_mq_notify(res, mqdes, notification) \
+ __sanitizer_syscall_post_impl_mq_notify(res, (long)(mqdes), \
+ (long)(notification))
+#define __sanitizer_syscall_pre_mq_getsetattr(mqdes, mqstat, omqstat) \
+ __sanitizer_syscall_pre_impl_mq_getsetattr((long)(mqdes), (long)(mqstat), \
+ (long)(omqstat))
+#define __sanitizer_syscall_post_mq_getsetattr(res, mqdes, mqstat, omqstat) \
+ __sanitizer_syscall_post_impl_mq_getsetattr(res, (long)(mqdes), \
+ (long)(mqstat), (long)(omqstat))
+#define __sanitizer_syscall_pre_pciconfig_iobase(which, bus, devfn) \
+ __sanitizer_syscall_pre_impl_pciconfig_iobase((long)(which), (long)(bus), \
+ (long)(devfn))
+#define __sanitizer_syscall_post_pciconfig_iobase(res, which, bus, devfn) \
+ __sanitizer_syscall_post_impl_pciconfig_iobase(res, (long)(which), \
+ (long)(bus), (long)(devfn))
+#define __sanitizer_syscall_pre_pciconfig_read(bus, dfn, off, len, buf) \
+ __sanitizer_syscall_pre_impl_pciconfig_read( \
+ (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
+#define __sanitizer_syscall_post_pciconfig_read(res, bus, dfn, off, len, buf) \
+ __sanitizer_syscall_post_impl_pciconfig_read( \
+ res, (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
+#define __sanitizer_syscall_pre_pciconfig_write(bus, dfn, off, len, buf) \
+ __sanitizer_syscall_pre_impl_pciconfig_write( \
+ (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
+#define __sanitizer_syscall_post_pciconfig_write(res, bus, dfn, off, len, buf) \
+ __sanitizer_syscall_post_impl_pciconfig_write( \
+ res, (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
+#define __sanitizer_syscall_pre_swapon(specialfile, swap_flags) \
+ __sanitizer_syscall_pre_impl_swapon((long)(specialfile), (long)(swap_flags))
+#define __sanitizer_syscall_post_swapon(res, specialfile, swap_flags) \
+ __sanitizer_syscall_post_impl_swapon(res, (long)(specialfile), \
+ (long)(swap_flags))
+#define __sanitizer_syscall_pre_swapoff(specialfile) \
+ __sanitizer_syscall_pre_impl_swapoff((long)(specialfile))
+#define __sanitizer_syscall_post_swapoff(res, specialfile) \
+ __sanitizer_syscall_post_impl_swapoff(res, (long)(specialfile))
+#define __sanitizer_syscall_pre_sysctl(args) \
+ __sanitizer_syscall_pre_impl_sysctl((long)(args))
+#define __sanitizer_syscall_post_sysctl(res, args) \
+ __sanitizer_syscall_post_impl_sysctl(res, (long)(args))
+#define __sanitizer_syscall_pre_sysinfo(info) \
+ __sanitizer_syscall_pre_impl_sysinfo((long)(info))
+#define __sanitizer_syscall_post_sysinfo(res, info) \
+ __sanitizer_syscall_post_impl_sysinfo(res, (long)(info))
+#define __sanitizer_syscall_pre_sysfs(option, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_sysfs((long)(option), (long)(arg1), (long)(arg2))
+#define __sanitizer_syscall_post_sysfs(res, option, arg1, arg2) \
+ __sanitizer_syscall_post_impl_sysfs(res, (long)(option), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_syslog(type, buf, len) \
+ __sanitizer_syscall_pre_impl_syslog((long)(type), (long)(buf), (long)(len))
+#define __sanitizer_syscall_post_syslog(res, type, buf, len) \
+ __sanitizer_syscall_post_impl_syslog(res, (long)(type), (long)(buf), \
+ (long)(len))
+#define __sanitizer_syscall_pre_uselib(library) \
+ __sanitizer_syscall_pre_impl_uselib((long)(library))
+#define __sanitizer_syscall_post_uselib(res, library) \
+ __sanitizer_syscall_post_impl_uselib(res, (long)(library))
+#define __sanitizer_syscall_pre_ni_syscall() \
+ __sanitizer_syscall_pre_impl_ni_syscall()
+#define __sanitizer_syscall_post_ni_syscall(res) \
+ __sanitizer_syscall_post_impl_ni_syscall(res)
+#define __sanitizer_syscall_pre_ptrace(request, pid, addr, data) \
+ __sanitizer_syscall_pre_impl_ptrace((long)(request), (long)(pid), \
+ (long)(addr), (long)(data))
+#define __sanitizer_syscall_post_ptrace(res, request, pid, addr, data) \
+ __sanitizer_syscall_post_impl_ptrace(res, (long)(request), (long)(pid), \
+ (long)(addr), (long)(data))
+#define __sanitizer_syscall_pre_add_key(_type, _description, _payload, plen, \
+ destringid) \
+ __sanitizer_syscall_pre_impl_add_key((long)(_type), (long)(_description), \
+ (long)(_payload), (long)(plen), \
+ (long)(destringid))
+#define __sanitizer_syscall_post_add_key(res, _type, _description, _payload, \
+ plen, destringid) \
+ __sanitizer_syscall_post_impl_add_key( \
+ res, (long)(_type), (long)(_description), (long)(_payload), \
+ (long)(plen), (long)(destringid))
+#define __sanitizer_syscall_pre_request_key(_type, _description, \
+ _callout_info, destringid) \
+ __sanitizer_syscall_pre_impl_request_key( \
+ (long)(_type), (long)(_description), (long)(_callout_info), \
+ (long)(destringid))
+#define __sanitizer_syscall_post_request_key(res, _type, _description, \
+ _callout_info, destringid) \
+ __sanitizer_syscall_post_impl_request_key( \
+ res, (long)(_type), (long)(_description), (long)(_callout_info), \
+ (long)(destringid))
+#define __sanitizer_syscall_pre_keyctl(cmd, arg2, arg3, arg4, arg5) \
+ __sanitizer_syscall_pre_impl_keyctl((long)(cmd), (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_post_keyctl(res, cmd, arg2, arg3, arg4, arg5) \
+ __sanitizer_syscall_post_impl_keyctl(res, (long)(cmd), (long)(arg2), \
+ (long)(arg3), (long)(arg4), \
+ (long)(arg5))
+#define __sanitizer_syscall_pre_ioprio_set(which, who, ioprio) \
+ __sanitizer_syscall_pre_impl_ioprio_set((long)(which), (long)(who), \
+ (long)(ioprio))
+#define __sanitizer_syscall_post_ioprio_set(res, which, who, ioprio) \
+ __sanitizer_syscall_post_impl_ioprio_set(res, (long)(which), (long)(who), \
+ (long)(ioprio))
+#define __sanitizer_syscall_pre_ioprio_get(which, who) \
+ __sanitizer_syscall_pre_impl_ioprio_get((long)(which), (long)(who))
+#define __sanitizer_syscall_post_ioprio_get(res, which, who) \
+ __sanitizer_syscall_post_impl_ioprio_get(res, (long)(which), (long)(who))
+#define __sanitizer_syscall_pre_set_mempolicy(mode, nmask, maxnode) \
+ __sanitizer_syscall_pre_impl_set_mempolicy((long)(mode), (long)(nmask), \
+ (long)(maxnode))
+#define __sanitizer_syscall_post_set_mempolicy(res, mode, nmask, maxnode) \
+ __sanitizer_syscall_post_impl_set_mempolicy(res, (long)(mode), \
+ (long)(nmask), (long)(maxnode))
+#define __sanitizer_syscall_pre_migrate_pages(pid, maxnode, from, to) \
+ __sanitizer_syscall_pre_impl_migrate_pages((long)(pid), (long)(maxnode), \
+ (long)(from), (long)(to))
+#define __sanitizer_syscall_post_migrate_pages(res, pid, maxnode, from, to) \
+ __sanitizer_syscall_post_impl_migrate_pages( \
+ res, (long)(pid), (long)(maxnode), (long)(from), (long)(to))
+#define __sanitizer_syscall_pre_move_pages(pid, nr_pages, pages, nodes, \
+ status, flags) \
+ __sanitizer_syscall_pre_impl_move_pages((long)(pid), (long)(nr_pages), \
+ (long)(pages), (long)(nodes), \
+ (long)(status), (long)(flags))
+#define __sanitizer_syscall_post_move_pages(res, pid, nr_pages, pages, nodes, \
+ status, flags) \
+ __sanitizer_syscall_post_impl_move_pages(res, (long)(pid), (long)(nr_pages), \
+ (long)(pages), (long)(nodes), \
+ (long)(status), (long)(flags))
+#define __sanitizer_syscall_pre_mbind(start, len, mode, nmask, maxnode, flags) \
+ __sanitizer_syscall_pre_impl_mbind((long)(start), (long)(len), (long)(mode), \
+ (long)(nmask), (long)(maxnode), \
+ (long)(flags))
+#define __sanitizer_syscall_post_mbind(res, start, len, mode, nmask, maxnode, \
+ flags) \
+ __sanitizer_syscall_post_impl_mbind(res, (long)(start), (long)(len), \
+ (long)(mode), (long)(nmask), \
+ (long)(maxnode), (long)(flags))
+#define __sanitizer_syscall_pre_get_mempolicy(policy, nmask, maxnode, addr, \
+ flags) \
+ __sanitizer_syscall_pre_impl_get_mempolicy((long)(policy), (long)(nmask), \
+ (long)(maxnode), (long)(addr), \
+ (long)(flags))
+#define __sanitizer_syscall_post_get_mempolicy(res, policy, nmask, maxnode, \
+ addr, flags) \
+ __sanitizer_syscall_post_impl_get_mempolicy(res, (long)(policy), \
+ (long)(nmask), (long)(maxnode), \
+ (long)(addr), (long)(flags))
+#define __sanitizer_syscall_pre_inotify_init() \
+ __sanitizer_syscall_pre_impl_inotify_init()
+#define __sanitizer_syscall_post_inotify_init(res) \
+ __sanitizer_syscall_post_impl_inotify_init(res)
+#define __sanitizer_syscall_pre_inotify_init1(flags) \
+ __sanitizer_syscall_pre_impl_inotify_init1((long)(flags))
+#define __sanitizer_syscall_post_inotify_init1(res, flags) \
+ __sanitizer_syscall_post_impl_inotify_init1(res, (long)(flags))
+#define __sanitizer_syscall_pre_inotify_add_watch(fd, path, mask) \
+ __sanitizer_syscall_pre_impl_inotify_add_watch((long)(fd), (long)(path), \
+ (long)(mask))
+#define __sanitizer_syscall_post_inotify_add_watch(res, fd, path, mask) \
+ __sanitizer_syscall_post_impl_inotify_add_watch(res, (long)(fd), \
+ (long)(path), (long)(mask))
+#define __sanitizer_syscall_pre_inotify_rm_watch(fd, wd) \
+ __sanitizer_syscall_pre_impl_inotify_rm_watch((long)(fd), (long)(wd))
+#define __sanitizer_syscall_post_inotify_rm_watch(res, fd, wd) \
+ __sanitizer_syscall_post_impl_inotify_rm_watch(res, (long)(fd), (long)(wd))
+#define __sanitizer_syscall_pre_spu_run(fd, unpc, ustatus) \
+ __sanitizer_syscall_pre_impl_spu_run((long)(fd), (long)(unpc), \
+ (long)(ustatus))
+#define __sanitizer_syscall_post_spu_run(res, fd, unpc, ustatus) \
+ __sanitizer_syscall_post_impl_spu_run(res, (long)(fd), (long)(unpc), \
+ (long)(ustatus))
+#define __sanitizer_syscall_pre_spu_create(name, flags, mode, fd) \
+ __sanitizer_syscall_pre_impl_spu_create((long)(name), (long)(flags), \
+ (long)(mode), (long)(fd))
+#define __sanitizer_syscall_post_spu_create(res, name, flags, mode, fd) \
+ __sanitizer_syscall_post_impl_spu_create(res, (long)(name), (long)(flags), \
+ (long)(mode), (long)(fd))
+#define __sanitizer_syscall_pre_mknodat(dfd, filename, mode, dev) \
+ __sanitizer_syscall_pre_impl_mknodat((long)(dfd), (long)(filename), \
+ (long)(mode), (long)(dev))
+#define __sanitizer_syscall_post_mknodat(res, dfd, filename, mode, dev) \
+ __sanitizer_syscall_post_impl_mknodat(res, (long)(dfd), (long)(filename), \
+ (long)(mode), (long)(dev))
+#define __sanitizer_syscall_pre_mkdirat(dfd, pathname, mode) \
+ __sanitizer_syscall_pre_impl_mkdirat((long)(dfd), (long)(pathname), \
+ (long)(mode))
+#define __sanitizer_syscall_post_mkdirat(res, dfd, pathname, mode) \
+ __sanitizer_syscall_post_impl_mkdirat(res, (long)(dfd), (long)(pathname), \
+ (long)(mode))
+#define __sanitizer_syscall_pre_unlinkat(dfd, pathname, flag) \
+ __sanitizer_syscall_pre_impl_unlinkat((long)(dfd), (long)(pathname), \
+ (long)(flag))
+#define __sanitizer_syscall_post_unlinkat(res, dfd, pathname, flag) \
+ __sanitizer_syscall_post_impl_unlinkat(res, (long)(dfd), (long)(pathname), \
+ (long)(flag))
+#define __sanitizer_syscall_pre_symlinkat(oldname, newdfd, newname) \
+ __sanitizer_syscall_pre_impl_symlinkat((long)(oldname), (long)(newdfd), \
+ (long)(newname))
+#define __sanitizer_syscall_post_symlinkat(res, oldname, newdfd, newname) \
+ __sanitizer_syscall_post_impl_symlinkat(res, (long)(oldname), \
+ (long)(newdfd), (long)(newname))
+#define __sanitizer_syscall_pre_linkat(olddfd, oldname, newdfd, newname, \
+ flags) \
+ __sanitizer_syscall_pre_impl_linkat((long)(olddfd), (long)(oldname), \
+ (long)(newdfd), (long)(newname), \
+ (long)(flags))
+#define __sanitizer_syscall_post_linkat(res, olddfd, oldname, newdfd, newname, \
+ flags) \
+ __sanitizer_syscall_post_impl_linkat(res, (long)(olddfd), (long)(oldname), \
+ (long)(newdfd), (long)(newname), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_renameat(olddfd, oldname, newdfd, newname) \
+ __sanitizer_syscall_pre_impl_renameat((long)(olddfd), (long)(oldname), \
+ (long)(newdfd), (long)(newname))
+#define __sanitizer_syscall_post_renameat(res, olddfd, oldname, newdfd, \
+ newname) \
+ __sanitizer_syscall_post_impl_renameat(res, (long)(olddfd), (long)(oldname), \
+ (long)(newdfd), (long)(newname))
+#define __sanitizer_syscall_pre_futimesat(dfd, filename, utimes) \
+ __sanitizer_syscall_pre_impl_futimesat((long)(dfd), (long)(filename), \
+ (long)(utimes))
+#define __sanitizer_syscall_post_futimesat(res, dfd, filename, utimes) \
+ __sanitizer_syscall_post_impl_futimesat(res, (long)(dfd), (long)(filename), \
+ (long)(utimes))
+#define __sanitizer_syscall_pre_faccessat(dfd, filename, mode) \
+ __sanitizer_syscall_pre_impl_faccessat((long)(dfd), (long)(filename), \
+ (long)(mode))
+#define __sanitizer_syscall_post_faccessat(res, dfd, filename, mode) \
+ __sanitizer_syscall_post_impl_faccessat(res, (long)(dfd), (long)(filename), \
+ (long)(mode))
+#define __sanitizer_syscall_pre_fchmodat(dfd, filename, mode) \
+ __sanitizer_syscall_pre_impl_fchmodat((long)(dfd), (long)(filename), \
+ (long)(mode))
+#define __sanitizer_syscall_post_fchmodat(res, dfd, filename, mode) \
+ __sanitizer_syscall_post_impl_fchmodat(res, (long)(dfd), (long)(filename), \
+ (long)(mode))
+#define __sanitizer_syscall_pre_fchownat(dfd, filename, user, group, flag) \
+ __sanitizer_syscall_pre_impl_fchownat((long)(dfd), (long)(filename), \
+ (long)(user), (long)(group), \
+ (long)(flag))
+#define __sanitizer_syscall_post_fchownat(res, dfd, filename, user, group, \
+ flag) \
+ __sanitizer_syscall_post_impl_fchownat(res, (long)(dfd), (long)(filename), \
+ (long)(user), (long)(group), \
+ (long)(flag))
+#define __sanitizer_syscall_pre_openat(dfd, filename, flags, mode) \
+ __sanitizer_syscall_pre_impl_openat((long)(dfd), (long)(filename), \
+ (long)(flags), (long)(mode))
+#define __sanitizer_syscall_post_openat(res, dfd, filename, flags, mode) \
+ __sanitizer_syscall_post_impl_openat(res, (long)(dfd), (long)(filename), \
+ (long)(flags), (long)(mode))
+#define __sanitizer_syscall_pre_newfstatat(dfd, filename, statbuf, flag) \
+ __sanitizer_syscall_pre_impl_newfstatat((long)(dfd), (long)(filename), \
+ (long)(statbuf), (long)(flag))
+#define __sanitizer_syscall_post_newfstatat(res, dfd, filename, statbuf, flag) \
+ __sanitizer_syscall_post_impl_newfstatat(res, (long)(dfd), (long)(filename), \
+ (long)(statbuf), (long)(flag))
+#define __sanitizer_syscall_pre_fstatat64(dfd, filename, statbuf, flag) \
+ __sanitizer_syscall_pre_impl_fstatat64((long)(dfd), (long)(filename), \
+ (long)(statbuf), (long)(flag))
+#define __sanitizer_syscall_post_fstatat64(res, dfd, filename, statbuf, flag) \
+ __sanitizer_syscall_post_impl_fstatat64(res, (long)(dfd), (long)(filename), \
+ (long)(statbuf), (long)(flag))
+#define __sanitizer_syscall_pre_readlinkat(dfd, path, buf, bufsiz) \
+ __sanitizer_syscall_pre_impl_readlinkat((long)(dfd), (long)(path), \
+ (long)(buf), (long)(bufsiz))
+#define __sanitizer_syscall_post_readlinkat(res, dfd, path, buf, bufsiz) \
+ __sanitizer_syscall_post_impl_readlinkat(res, (long)(dfd), (long)(path), \
+ (long)(buf), (long)(bufsiz))
+#define __sanitizer_syscall_pre_utimensat(dfd, filename, utimes, flags) \
+ __sanitizer_syscall_pre_impl_utimensat((long)(dfd), (long)(filename), \
+ (long)(utimes), (long)(flags))
+#define __sanitizer_syscall_post_utimensat(res, dfd, filename, utimes, flags) \
+ __sanitizer_syscall_post_impl_utimensat(res, (long)(dfd), (long)(filename), \
+ (long)(utimes), (long)(flags))
+#define __sanitizer_syscall_pre_unshare(unshare_flags) \
+ __sanitizer_syscall_pre_impl_unshare((long)(unshare_flags))
+#define __sanitizer_syscall_post_unshare(res, unshare_flags) \
+ __sanitizer_syscall_post_impl_unshare(res, (long)(unshare_flags))
+#define __sanitizer_syscall_pre_splice(fd_in, off_in, fd_out, off_out, len, \
+ flags) \
+ __sanitizer_syscall_pre_impl_splice((long)(fd_in), (long)(off_in), \
+ (long)(fd_out), (long)(off_out), \
+ (long)(len), (long)(flags))
+#define __sanitizer_syscall_post_splice(res, fd_in, off_in, fd_out, off_out, \
+ len, flags) \
+ __sanitizer_syscall_post_impl_splice(res, (long)(fd_in), (long)(off_in), \
+ (long)(fd_out), (long)(off_out), \
+ (long)(len), (long)(flags))
+#define __sanitizer_syscall_pre_vmsplice(fd, iov, nr_segs, flags) \
+ __sanitizer_syscall_pre_impl_vmsplice((long)(fd), (long)(iov), \
+ (long)(nr_segs), (long)(flags))
+#define __sanitizer_syscall_post_vmsplice(res, fd, iov, nr_segs, flags) \
+ __sanitizer_syscall_post_impl_vmsplice(res, (long)(fd), (long)(iov), \
+ (long)(nr_segs), (long)(flags))
+#define __sanitizer_syscall_pre_tee(fdin, fdout, len, flags) \
+ __sanitizer_syscall_pre_impl_tee((long)(fdin), (long)(fdout), (long)(len), \
+ (long)(flags))
+#define __sanitizer_syscall_post_tee(res, fdin, fdout, len, flags) \
+ __sanitizer_syscall_post_impl_tee(res, (long)(fdin), (long)(fdout), \
+ (long)(len), (long)(flags))
+#define __sanitizer_syscall_pre_get_robust_list(pid, head_ptr, len_ptr) \
+ __sanitizer_syscall_pre_impl_get_robust_list((long)(pid), (long)(head_ptr), \
+ (long)(len_ptr))
+#define __sanitizer_syscall_post_get_robust_list(res, pid, head_ptr, len_ptr) \
+ __sanitizer_syscall_post_impl_get_robust_list( \
+ res, (long)(pid), (long)(head_ptr), (long)(len_ptr))
+#define __sanitizer_syscall_pre_set_robust_list(head, len) \
+ __sanitizer_syscall_pre_impl_set_robust_list((long)(head), (long)(len))
+#define __sanitizer_syscall_post_set_robust_list(res, head, len) \
+ __sanitizer_syscall_post_impl_set_robust_list(res, (long)(head), (long)(len))
+#define __sanitizer_syscall_pre_getcpu(cpu, node, cache) \
+ __sanitizer_syscall_pre_impl_getcpu((long)(cpu), (long)(node), (long)(cache))
+#define __sanitizer_syscall_post_getcpu(res, cpu, node, cache) \
+ __sanitizer_syscall_post_impl_getcpu(res, (long)(cpu), (long)(node), \
+ (long)(cache))
+#define __sanitizer_syscall_pre_signalfd(ufd, user_mask, sizemask) \
+ __sanitizer_syscall_pre_impl_signalfd((long)(ufd), (long)(user_mask), \
+ (long)(sizemask))
+#define __sanitizer_syscall_post_signalfd(res, ufd, user_mask, sizemask) \
+ __sanitizer_syscall_post_impl_signalfd(res, (long)(ufd), (long)(user_mask), \
+ (long)(sizemask))
+#define __sanitizer_syscall_pre_signalfd4(ufd, user_mask, sizemask, flags) \
+ __sanitizer_syscall_pre_impl_signalfd4((long)(ufd), (long)(user_mask), \
+ (long)(sizemask), (long)(flags))
+#define __sanitizer_syscall_post_signalfd4(res, ufd, user_mask, sizemask, \
+ flags) \
+ __sanitizer_syscall_post_impl_signalfd4(res, (long)(ufd), (long)(user_mask), \
+ (long)(sizemask), (long)(flags))
+#define __sanitizer_syscall_pre_timerfd_create(clockid, flags) \
+ __sanitizer_syscall_pre_impl_timerfd_create((long)(clockid), (long)(flags))
+#define __sanitizer_syscall_post_timerfd_create(res, clockid, flags) \
+ __sanitizer_syscall_post_impl_timerfd_create(res, (long)(clockid), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_timerfd_settime(ufd, flags, utmr, otmr) \
+ __sanitizer_syscall_pre_impl_timerfd_settime((long)(ufd), (long)(flags), \
+ (long)(utmr), (long)(otmr))
+#define __sanitizer_syscall_post_timerfd_settime(res, ufd, flags, utmr, otmr) \
+ __sanitizer_syscall_post_impl_timerfd_settime( \
+ res, (long)(ufd), (long)(flags), (long)(utmr), (long)(otmr))
+#define __sanitizer_syscall_pre_timerfd_gettime(ufd, otmr) \
+ __sanitizer_syscall_pre_impl_timerfd_gettime((long)(ufd), (long)(otmr))
+#define __sanitizer_syscall_post_timerfd_gettime(res, ufd, otmr) \
+ __sanitizer_syscall_post_impl_timerfd_gettime(res, (long)(ufd), (long)(otmr))
+#define __sanitizer_syscall_pre_eventfd(count) \
+ __sanitizer_syscall_pre_impl_eventfd((long)(count))
+#define __sanitizer_syscall_post_eventfd(res, count) \
+ __sanitizer_syscall_post_impl_eventfd(res, (long)(count))
+#define __sanitizer_syscall_pre_eventfd2(count, flags) \
+ __sanitizer_syscall_pre_impl_eventfd2((long)(count), (long)(flags))
+#define __sanitizer_syscall_post_eventfd2(res, count, flags) \
+ __sanitizer_syscall_post_impl_eventfd2(res, (long)(count), (long)(flags))
+#define __sanitizer_syscall_pre_old_readdir(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_old_readdir((long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_post_old_readdir(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_old_readdir(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_pselect6(arg0, arg1, arg2, arg3, arg4, arg5) \
+ __sanitizer_syscall_pre_impl_pselect6((long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_post_pselect6(res, arg0, arg1, arg2, arg3, arg4, \
+ arg5) \
+ __sanitizer_syscall_post_impl_pselect6(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_pre_ppoll(arg0, arg1, arg2, arg3, arg4) \
+ __sanitizer_syscall_pre_impl_ppoll((long)(arg0), (long)(arg1), (long)(arg2), \
+ (long)(arg3), (long)(arg4))
+#define __sanitizer_syscall_post_ppoll(res, arg0, arg1, arg2, arg3, arg4) \
+ __sanitizer_syscall_post_impl_ppoll(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4))
+#define __sanitizer_syscall_pre_syncfs(fd) \
+ __sanitizer_syscall_pre_impl_syncfs((long)(fd))
+#define __sanitizer_syscall_post_syncfs(res, fd) \
+ __sanitizer_syscall_post_impl_syncfs(res, (long)(fd))
+#define __sanitizer_syscall_pre_perf_event_open(attr_uptr, pid, cpu, group_fd, \
+ flags) \
+ __sanitizer_syscall_pre_impl_perf_event_open((long)(attr_uptr), (long)(pid), \
+ (long)(cpu), (long)(group_fd), \
+ (long)(flags))
+#define __sanitizer_syscall_post_perf_event_open(res, attr_uptr, pid, cpu, \
+ group_fd, flags) \
+ __sanitizer_syscall_post_impl_perf_event_open( \
+ res, (long)(attr_uptr), (long)(pid), (long)(cpu), (long)(group_fd), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_mmap_pgoff(addr, len, prot, flags, fd, pgoff) \
+ __sanitizer_syscall_pre_impl_mmap_pgoff((long)(addr), (long)(len), \
+ (long)(prot), (long)(flags), \
+ (long)(fd), (long)(pgoff))
+#define __sanitizer_syscall_post_mmap_pgoff(res, addr, len, prot, flags, fd, \
+ pgoff) \
+ __sanitizer_syscall_post_impl_mmap_pgoff(res, (long)(addr), (long)(len), \
+ (long)(prot), (long)(flags), \
+ (long)(fd), (long)(pgoff))
+#define __sanitizer_syscall_pre_old_mmap(arg) \
+ __sanitizer_syscall_pre_impl_old_mmap((long)(arg))
+#define __sanitizer_syscall_post_old_mmap(res, arg) \
+ __sanitizer_syscall_post_impl_old_mmap(res, (long)(arg))
+#define __sanitizer_syscall_pre_name_to_handle_at(dfd, name, handle, mnt_id, \
+ flag) \
+ __sanitizer_syscall_pre_impl_name_to_handle_at( \
+ (long)(dfd), (long)(name), (long)(handle), (long)(mnt_id), (long)(flag))
+#define __sanitizer_syscall_post_name_to_handle_at(res, dfd, name, handle, \
+ mnt_id, flag) \
+ __sanitizer_syscall_post_impl_name_to_handle_at( \
+ res, (long)(dfd), (long)(name), (long)(handle), (long)(mnt_id), \
+ (long)(flag))
+#define __sanitizer_syscall_pre_open_by_handle_at(mountdirfd, handle, flags) \
+ __sanitizer_syscall_pre_impl_open_by_handle_at( \
+ (long)(mountdirfd), (long)(handle), (long)(flags))
+#define __sanitizer_syscall_post_open_by_handle_at(res, mountdirfd, handle, \
+ flags) \
+ __sanitizer_syscall_post_impl_open_by_handle_at( \
+ res, (long)(mountdirfd), (long)(handle), (long)(flags))
+#define __sanitizer_syscall_pre_setns(fd, nstype) \
+ __sanitizer_syscall_pre_impl_setns((long)(fd), (long)(nstype))
+#define __sanitizer_syscall_post_setns(res, fd, nstype) \
+ __sanitizer_syscall_post_impl_setns(res, (long)(fd), (long)(nstype))
+#define __sanitizer_syscall_pre_process_vm_readv(pid, lvec, liovcnt, rvec, \
+ riovcnt, flags) \
+ __sanitizer_syscall_pre_impl_process_vm_readv( \
+ (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \
+ (long)(riovcnt), (long)(flags))
+#define __sanitizer_syscall_post_process_vm_readv(res, pid, lvec, liovcnt, \
+ rvec, riovcnt, flags) \
+ __sanitizer_syscall_post_impl_process_vm_readv( \
+ res, (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \
+ (long)(riovcnt), (long)(flags))
+#define __sanitizer_syscall_pre_process_vm_writev(pid, lvec, liovcnt, rvec, \
+ riovcnt, flags) \
+ __sanitizer_syscall_pre_impl_process_vm_writev( \
+ (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \
+ (long)(riovcnt), (long)(flags))
+#define __sanitizer_syscall_post_process_vm_writev(res, pid, lvec, liovcnt, \
+ rvec, riovcnt, flags) \
+ __sanitizer_syscall_post_impl_process_vm_writev( \
+ res, (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \
+ (long)(riovcnt), (long)(flags))
+#define __sanitizer_syscall_pre_fork() \
+ __sanitizer_syscall_pre_impl_fork()
+#define __sanitizer_syscall_post_fork(res) \
+ __sanitizer_syscall_post_impl_fork(res)
+#define __sanitizer_syscall_pre_vfork() \
+ __sanitizer_syscall_pre_impl_vfork()
+#define __sanitizer_syscall_post_vfork(res) \
+ __sanitizer_syscall_post_impl_vfork(res)
+
+// And now a few syscalls we don't handle yet.
+#define __sanitizer_syscall_pre_afs_syscall(...)
+#define __sanitizer_syscall_pre_arch_prctl(...)
+#define __sanitizer_syscall_pre_break(...)
+#define __sanitizer_syscall_pre_chown32(...)
+#define __sanitizer_syscall_pre_clone(...)
+#define __sanitizer_syscall_pre_create_module(...)
+#define __sanitizer_syscall_pre_epoll_ctl_old(...)
+#define __sanitizer_syscall_pre_epoll_wait_old(...)
+#define __sanitizer_syscall_pre_execve(...)
+#define __sanitizer_syscall_pre_fadvise64(...)
+#define __sanitizer_syscall_pre_fadvise64_64(...)
+#define __sanitizer_syscall_pre_fallocate(...)
+#define __sanitizer_syscall_pre_fanotify_init(...)
+#define __sanitizer_syscall_pre_fanotify_mark(...)
+#define __sanitizer_syscall_pre_fchown32(...)
+#define __sanitizer_syscall_pre_ftime(...)
+#define __sanitizer_syscall_pre_ftruncate64(...)
+#define __sanitizer_syscall_pre_futex(...)
+#define __sanitizer_syscall_pre_getegid32(...)
+#define __sanitizer_syscall_pre_geteuid32(...)
+#define __sanitizer_syscall_pre_getgid32(...)
+#define __sanitizer_syscall_pre_getgroups32(...)
+#define __sanitizer_syscall_pre_get_kernel_syms(...)
+#define __sanitizer_syscall_pre_getpmsg(...)
+#define __sanitizer_syscall_pre_getresgid32(...)
+#define __sanitizer_syscall_pre_getresuid32(...)
+#define __sanitizer_syscall_pre_get_thread_area(...)
+#define __sanitizer_syscall_pre_getuid32(...)
+#define __sanitizer_syscall_pre_gtty(...)
+#define __sanitizer_syscall_pre_idle(...)
+#define __sanitizer_syscall_pre_iopl(...)
+#define __sanitizer_syscall_pre_lchown32(...)
+#define __sanitizer_syscall_pre__llseek(...)
+#define __sanitizer_syscall_pre_lock(...)
+#define __sanitizer_syscall_pre_madvise1(...)
+#define __sanitizer_syscall_pre_mmap(...)
+#define __sanitizer_syscall_pre_mmap2(...)
+#define __sanitizer_syscall_pre_modify_ldt(...)
+#define __sanitizer_syscall_pre_mpx(...)
+#define __sanitizer_syscall_pre__newselect(...)
+#define __sanitizer_syscall_pre_nfsservctl(...)
+#define __sanitizer_syscall_pre_oldfstat(...)
+#define __sanitizer_syscall_pre_oldlstat(...)
+#define __sanitizer_syscall_pre_oldolduname(...)
+#define __sanitizer_syscall_pre_oldstat(...)
+#define __sanitizer_syscall_pre_prctl(...)
+#define __sanitizer_syscall_pre_prof(...)
+#define __sanitizer_syscall_pre_profil(...)
+#define __sanitizer_syscall_pre_putpmsg(...)
+#define __sanitizer_syscall_pre_query_module(...)
+#define __sanitizer_syscall_pre_readahead(...)
+#define __sanitizer_syscall_pre_readdir(...)
+#define __sanitizer_syscall_pre_rt_sigaction(...)
+#define __sanitizer_syscall_pre_rt_sigreturn(...)
+#define __sanitizer_syscall_pre_rt_sigsuspend(...)
+#define __sanitizer_syscall_pre_security(...)
+#define __sanitizer_syscall_pre_setfsgid32(...)
+#define __sanitizer_syscall_pre_setfsuid32(...)
+#define __sanitizer_syscall_pre_setgid32(...)
+#define __sanitizer_syscall_pre_setgroups32(...)
+#define __sanitizer_syscall_pre_setregid32(...)
+#define __sanitizer_syscall_pre_setresgid32(...)
+#define __sanitizer_syscall_pre_setresuid32(...)
+#define __sanitizer_syscall_pre_setreuid32(...)
+#define __sanitizer_syscall_pre_set_thread_area(...)
+#define __sanitizer_syscall_pre_setuid32(...)
+#define __sanitizer_syscall_pre_sigaction(...)
+#define __sanitizer_syscall_pre_sigaltstack(...)
+#define __sanitizer_syscall_pre_sigreturn(...)
+#define __sanitizer_syscall_pre_sigsuspend(...)
+#define __sanitizer_syscall_pre_stty(...)
+#define __sanitizer_syscall_pre_sync_file_range(...)
+#define __sanitizer_syscall_pre__sysctl(...)
+#define __sanitizer_syscall_pre_truncate64(...)
+#define __sanitizer_syscall_pre_tuxcall(...)
+#define __sanitizer_syscall_pre_ugetrlimit(...)
+#define __sanitizer_syscall_pre_ulimit(...)
+#define __sanitizer_syscall_pre_umount2(...)
+#define __sanitizer_syscall_pre_vm86(...)
+#define __sanitizer_syscall_pre_vm86old(...)
+#define __sanitizer_syscall_pre_vserver(...)
+
+#define __sanitizer_syscall_post_afs_syscall(res, ...)
+#define __sanitizer_syscall_post_arch_prctl(res, ...)
+#define __sanitizer_syscall_post_break(res, ...)
+#define __sanitizer_syscall_post_chown32(res, ...)
+#define __sanitizer_syscall_post_clone(res, ...)
+#define __sanitizer_syscall_post_create_module(res, ...)
+#define __sanitizer_syscall_post_epoll_ctl_old(res, ...)
+#define __sanitizer_syscall_post_epoll_wait_old(res, ...)
+#define __sanitizer_syscall_post_execve(res, ...)
+#define __sanitizer_syscall_post_fadvise64(res, ...)
+#define __sanitizer_syscall_post_fadvise64_64(res, ...)
+#define __sanitizer_syscall_post_fallocate(res, ...)
+#define __sanitizer_syscall_post_fanotify_init(res, ...)
+#define __sanitizer_syscall_post_fanotify_mark(res, ...)
+#define __sanitizer_syscall_post_fchown32(res, ...)
+#define __sanitizer_syscall_post_ftime(res, ...)
+#define __sanitizer_syscall_post_ftruncate64(res, ...)
+#define __sanitizer_syscall_post_futex(res, ...)
+#define __sanitizer_syscall_post_getegid32(res, ...)
+#define __sanitizer_syscall_post_geteuid32(res, ...)
+#define __sanitizer_syscall_post_getgid32(res, ...)
+#define __sanitizer_syscall_post_getgroups32(res, ...)
+#define __sanitizer_syscall_post_get_kernel_syms(res, ...)
+#define __sanitizer_syscall_post_getpmsg(res, ...)
+#define __sanitizer_syscall_post_getresgid32(res, ...)
+#define __sanitizer_syscall_post_getresuid32(res, ...)
+#define __sanitizer_syscall_post_get_thread_area(res, ...)
+#define __sanitizer_syscall_post_getuid32(res, ...)
+#define __sanitizer_syscall_post_gtty(res, ...)
+#define __sanitizer_syscall_post_idle(res, ...)
+#define __sanitizer_syscall_post_iopl(res, ...)
+#define __sanitizer_syscall_post_lchown32(res, ...)
+#define __sanitizer_syscall_post__llseek(res, ...)
+#define __sanitizer_syscall_post_lock(res, ...)
+#define __sanitizer_syscall_post_madvise1(res, ...)
+#define __sanitizer_syscall_post_mmap2(res, ...)
+#define __sanitizer_syscall_post_mmap(res, ...)
+#define __sanitizer_syscall_post_modify_ldt(res, ...)
+#define __sanitizer_syscall_post_mpx(res, ...)
+#define __sanitizer_syscall_post__newselect(res, ...)
+#define __sanitizer_syscall_post_nfsservctl(res, ...)
+#define __sanitizer_syscall_post_oldfstat(res, ...)
+#define __sanitizer_syscall_post_oldlstat(res, ...)
+#define __sanitizer_syscall_post_oldolduname(res, ...)
+#define __sanitizer_syscall_post_oldstat(res, ...)
+#define __sanitizer_syscall_post_prctl(res, ...)
+#define __sanitizer_syscall_post_profil(res, ...)
+#define __sanitizer_syscall_post_prof(res, ...)
+#define __sanitizer_syscall_post_putpmsg(res, ...)
+#define __sanitizer_syscall_post_query_module(res, ...)
+#define __sanitizer_syscall_post_readahead(res, ...)
+#define __sanitizer_syscall_post_readdir(res, ...)
+#define __sanitizer_syscall_post_rt_sigaction(res, ...)
+#define __sanitizer_syscall_post_rt_sigreturn(res, ...)
+#define __sanitizer_syscall_post_rt_sigsuspend(res, ...)
+#define __sanitizer_syscall_post_security(res, ...)
+#define __sanitizer_syscall_post_setfsgid32(res, ...)
+#define __sanitizer_syscall_post_setfsuid32(res, ...)
+#define __sanitizer_syscall_post_setgid32(res, ...)
+#define __sanitizer_syscall_post_setgroups32(res, ...)
+#define __sanitizer_syscall_post_setregid32(res, ...)
+#define __sanitizer_syscall_post_setresgid32(res, ...)
+#define __sanitizer_syscall_post_setresuid32(res, ...)
+#define __sanitizer_syscall_post_setreuid32(res, ...)
+#define __sanitizer_syscall_post_set_thread_area(res, ...)
+#define __sanitizer_syscall_post_setuid32(res, ...)
+#define __sanitizer_syscall_post_sigaction(res, ...)
+#define __sanitizer_syscall_post_sigaltstack(res, ...)
+#define __sanitizer_syscall_post_sigreturn(res, ...)
+#define __sanitizer_syscall_post_sigsuspend(res, ...)
+#define __sanitizer_syscall_post_stty(res, ...)
+#define __sanitizer_syscall_post_sync_file_range(res, ...)
+#define __sanitizer_syscall_post__sysctl(res, ...)
+#define __sanitizer_syscall_post_truncate64(res, ...)
+#define __sanitizer_syscall_post_tuxcall(res, ...)
+#define __sanitizer_syscall_post_ugetrlimit(res, ...)
+#define __sanitizer_syscall_post_ulimit(res, ...)
+#define __sanitizer_syscall_post_umount2(res, ...)
+#define __sanitizer_syscall_post_vm86old(res, ...)
+#define __sanitizer_syscall_post_vm86(res, ...)
+#define __sanitizer_syscall_post_vserver(res, ...)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Private declarations. Do not call directly from user code. Use macros above.
+void __sanitizer_syscall_pre_impl_time(long tloc);
+void __sanitizer_syscall_post_impl_time(long res, long tloc);
+void __sanitizer_syscall_pre_impl_stime(long tptr);
+void __sanitizer_syscall_post_impl_stime(long res, long tptr);
+void __sanitizer_syscall_pre_impl_gettimeofday(long tv, long tz);
+void __sanitizer_syscall_post_impl_gettimeofday(long res, long tv, long tz);
+void __sanitizer_syscall_pre_impl_settimeofday(long tv, long tz);
+void __sanitizer_syscall_post_impl_settimeofday(long res, long tv, long tz);
+void __sanitizer_syscall_pre_impl_adjtimex(long txc_p);
+void __sanitizer_syscall_post_impl_adjtimex(long res, long txc_p);
+void __sanitizer_syscall_pre_impl_times(long tbuf);
+void __sanitizer_syscall_post_impl_times(long res, long tbuf);
+void __sanitizer_syscall_pre_impl_gettid();
+void __sanitizer_syscall_post_impl_gettid(long res);
+void __sanitizer_syscall_pre_impl_nanosleep(long rqtp, long rmtp);
+void __sanitizer_syscall_post_impl_nanosleep(long res, long rqtp, long rmtp);
+void __sanitizer_syscall_pre_impl_alarm(long seconds);
+void __sanitizer_syscall_post_impl_alarm(long res, long seconds);
+void __sanitizer_syscall_pre_impl_getpid();
+void __sanitizer_syscall_post_impl_getpid(long res);
+void __sanitizer_syscall_pre_impl_getppid();
+void __sanitizer_syscall_post_impl_getppid(long res);
+void __sanitizer_syscall_pre_impl_getuid();
+void __sanitizer_syscall_post_impl_getuid(long res);
+void __sanitizer_syscall_pre_impl_geteuid();
+void __sanitizer_syscall_post_impl_geteuid(long res);
+void __sanitizer_syscall_pre_impl_getgid();
+void __sanitizer_syscall_post_impl_getgid(long res);
+void __sanitizer_syscall_pre_impl_getegid();
+void __sanitizer_syscall_post_impl_getegid(long res);
+void __sanitizer_syscall_pre_impl_getresuid(long ruid, long euid, long suid);
+void __sanitizer_syscall_post_impl_getresuid(long res, long ruid, long euid,
+ long suid);
+void __sanitizer_syscall_pre_impl_getresgid(long rgid, long egid, long sgid);
+void __sanitizer_syscall_post_impl_getresgid(long res, long rgid, long egid,
+ long sgid);
+void __sanitizer_syscall_pre_impl_getpgid(long pid);
+void __sanitizer_syscall_post_impl_getpgid(long res, long pid);
+void __sanitizer_syscall_pre_impl_getpgrp();
+void __sanitizer_syscall_post_impl_getpgrp(long res);
+void __sanitizer_syscall_pre_impl_getsid(long pid);
+void __sanitizer_syscall_post_impl_getsid(long res, long pid);
+void __sanitizer_syscall_pre_impl_getgroups(long gidsetsize, long grouplist);
+void __sanitizer_syscall_post_impl_getgroups(long res, long gidsetsize,
+ long grouplist);
+void __sanitizer_syscall_pre_impl_setregid(long rgid, long egid);
+void __sanitizer_syscall_post_impl_setregid(long res, long rgid, long egid);
+void __sanitizer_syscall_pre_impl_setgid(long gid);
+void __sanitizer_syscall_post_impl_setgid(long res, long gid);
+void __sanitizer_syscall_pre_impl_setreuid(long ruid, long euid);
+void __sanitizer_syscall_post_impl_setreuid(long res, long ruid, long euid);
+void __sanitizer_syscall_pre_impl_setuid(long uid);
+void __sanitizer_syscall_post_impl_setuid(long res, long uid);
+void __sanitizer_syscall_pre_impl_setresuid(long ruid, long euid, long suid);
+void __sanitizer_syscall_post_impl_setresuid(long res, long ruid, long euid,
+ long suid);
+void __sanitizer_syscall_pre_impl_setresgid(long rgid, long egid, long sgid);
+void __sanitizer_syscall_post_impl_setresgid(long res, long rgid, long egid,
+ long sgid);
+void __sanitizer_syscall_pre_impl_setfsuid(long uid);
+void __sanitizer_syscall_post_impl_setfsuid(long res, long uid);
+void __sanitizer_syscall_pre_impl_setfsgid(long gid);
+void __sanitizer_syscall_post_impl_setfsgid(long res, long gid);
+void __sanitizer_syscall_pre_impl_setpgid(long pid, long pgid);
+void __sanitizer_syscall_post_impl_setpgid(long res, long pid, long pgid);
+void __sanitizer_syscall_pre_impl_setsid();
+void __sanitizer_syscall_post_impl_setsid(long res);
+void __sanitizer_syscall_pre_impl_setgroups(long gidsetsize, long grouplist);
+void __sanitizer_syscall_post_impl_setgroups(long res, long gidsetsize,
+ long grouplist);
+void __sanitizer_syscall_pre_impl_acct(long name);
+void __sanitizer_syscall_post_impl_acct(long res, long name);
+void __sanitizer_syscall_pre_impl_capget(long header, long dataptr);
+void __sanitizer_syscall_post_impl_capget(long res, long header, long dataptr);
+void __sanitizer_syscall_pre_impl_capset(long header, long data);
+void __sanitizer_syscall_post_impl_capset(long res, long header, long data);
+void __sanitizer_syscall_pre_impl_personality(long personality);
+void __sanitizer_syscall_post_impl_personality(long res, long personality);
+void __sanitizer_syscall_pre_impl_sigpending(long set);
+void __sanitizer_syscall_post_impl_sigpending(long res, long set);
+void __sanitizer_syscall_pre_impl_sigprocmask(long how, long set, long oset);
+void __sanitizer_syscall_post_impl_sigprocmask(long res, long how, long set,
+ long oset);
+void __sanitizer_syscall_pre_impl_getitimer(long which, long value);
+void __sanitizer_syscall_post_impl_getitimer(long res, long which, long value);
+void __sanitizer_syscall_pre_impl_setitimer(long which, long value,
+ long ovalue);
+void __sanitizer_syscall_post_impl_setitimer(long res, long which, long value,
+ long ovalue);
+void __sanitizer_syscall_pre_impl_timer_create(long which_clock,
+ long timer_event_spec,
+ long created_timer_id);
+void __sanitizer_syscall_post_impl_timer_create(long res, long which_clock,
+ long timer_event_spec,
+ long created_timer_id);
+void __sanitizer_syscall_pre_impl_timer_gettime(long timer_id, long setting);
+void __sanitizer_syscall_post_impl_timer_gettime(long res, long timer_id,
+ long setting);
+void __sanitizer_syscall_pre_impl_timer_getoverrun(long timer_id);
+void __sanitizer_syscall_post_impl_timer_getoverrun(long res, long timer_id);
+void __sanitizer_syscall_pre_impl_timer_settime(long timer_id, long flags,
+ long new_setting,
+ long old_setting);
+void __sanitizer_syscall_post_impl_timer_settime(long res, long timer_id,
+ long flags, long new_setting,
+ long old_setting);
+void __sanitizer_syscall_pre_impl_timer_delete(long timer_id);
+void __sanitizer_syscall_post_impl_timer_delete(long res, long timer_id);
+void __sanitizer_syscall_pre_impl_clock_settime(long which_clock, long tp);
+void __sanitizer_syscall_post_impl_clock_settime(long res, long which_clock,
+ long tp);
+void __sanitizer_syscall_pre_impl_clock_gettime(long which_clock, long tp);
+void __sanitizer_syscall_post_impl_clock_gettime(long res, long which_clock,
+ long tp);
+void __sanitizer_syscall_pre_impl_clock_adjtime(long which_clock, long tx);
+void __sanitizer_syscall_post_impl_clock_adjtime(long res, long which_clock,
+ long tx);
+void __sanitizer_syscall_pre_impl_clock_getres(long which_clock, long tp);
+void __sanitizer_syscall_post_impl_clock_getres(long res, long which_clock,
+ long tp);
+void __sanitizer_syscall_pre_impl_clock_nanosleep(long which_clock, long flags,
+ long rqtp, long rmtp);
+void __sanitizer_syscall_post_impl_clock_nanosleep(long res, long which_clock,
+ long flags, long rqtp,
+ long rmtp);
+void __sanitizer_syscall_pre_impl_nice(long increment);
+void __sanitizer_syscall_post_impl_nice(long res, long increment);
+void __sanitizer_syscall_pre_impl_sched_setscheduler(long pid, long policy,
+ long param);
+void __sanitizer_syscall_post_impl_sched_setscheduler(long res, long pid,
+ long policy, long param);
+void __sanitizer_syscall_pre_impl_sched_setparam(long pid, long param);
+void __sanitizer_syscall_post_impl_sched_setparam(long res, long pid,
+ long param);
+void __sanitizer_syscall_pre_impl_sched_getscheduler(long pid);
+void __sanitizer_syscall_post_impl_sched_getscheduler(long res, long pid);
+void __sanitizer_syscall_pre_impl_sched_getparam(long pid, long param);
+void __sanitizer_syscall_post_impl_sched_getparam(long res, long pid,
+ long param);
+void __sanitizer_syscall_pre_impl_sched_setaffinity(long pid, long len,
+ long user_mask_ptr);
+void __sanitizer_syscall_post_impl_sched_setaffinity(long res, long pid,
+ long len,
+ long user_mask_ptr);
+void __sanitizer_syscall_pre_impl_sched_getaffinity(long pid, long len,
+ long user_mask_ptr);
+void __sanitizer_syscall_post_impl_sched_getaffinity(long res, long pid,
+ long len,
+ long user_mask_ptr);
+void __sanitizer_syscall_pre_impl_sched_yield();
+void __sanitizer_syscall_post_impl_sched_yield(long res);
+void __sanitizer_syscall_pre_impl_sched_get_priority_max(long policy);
+void __sanitizer_syscall_post_impl_sched_get_priority_max(long res,
+ long policy);
+void __sanitizer_syscall_pre_impl_sched_get_priority_min(long policy);
+void __sanitizer_syscall_post_impl_sched_get_priority_min(long res,
+ long policy);
+void __sanitizer_syscall_pre_impl_sched_rr_get_interval(long pid,
+ long interval);
+void __sanitizer_syscall_post_impl_sched_rr_get_interval(long res, long pid,
+ long interval);
+void __sanitizer_syscall_pre_impl_setpriority(long which, long who,
+ long niceval);
+void __sanitizer_syscall_post_impl_setpriority(long res, long which, long who,
+ long niceval);
+void __sanitizer_syscall_pre_impl_getpriority(long which, long who);
+void __sanitizer_syscall_post_impl_getpriority(long res, long which, long who);
+void __sanitizer_syscall_pre_impl_shutdown(long arg0, long arg1);
+void __sanitizer_syscall_post_impl_shutdown(long res, long arg0, long arg1);
+void __sanitizer_syscall_pre_impl_reboot(long magic1, long magic2, long cmd,
+ long arg);
+void __sanitizer_syscall_post_impl_reboot(long res, long magic1, long magic2,
+ long cmd, long arg);
+void __sanitizer_syscall_pre_impl_restart_syscall();
+void __sanitizer_syscall_post_impl_restart_syscall(long res);
+void __sanitizer_syscall_pre_impl_kexec_load(long entry, long nr_segments,
+ long segments, long flags);
+void __sanitizer_syscall_post_impl_kexec_load(long res, long entry,
+ long nr_segments, long segments,
+ long flags);
+void __sanitizer_syscall_pre_impl_exit(long error_code);
+void __sanitizer_syscall_post_impl_exit(long res, long error_code);
+void __sanitizer_syscall_pre_impl_exit_group(long error_code);
+void __sanitizer_syscall_post_impl_exit_group(long res, long error_code);
+void __sanitizer_syscall_pre_impl_wait4(long pid, long stat_addr, long options,
+ long ru);
+void __sanitizer_syscall_post_impl_wait4(long res, long pid, long stat_addr,
+ long options, long ru);
+void __sanitizer_syscall_pre_impl_waitid(long which, long pid, long infop,
+ long options, long ru);
+void __sanitizer_syscall_post_impl_waitid(long res, long which, long pid,
+ long infop, long options, long ru);
+void __sanitizer_syscall_pre_impl_waitpid(long pid, long stat_addr,
+ long options);
+void __sanitizer_syscall_post_impl_waitpid(long res, long pid, long stat_addr,
+ long options);
+void __sanitizer_syscall_pre_impl_set_tid_address(long tidptr);
+void __sanitizer_syscall_post_impl_set_tid_address(long res, long tidptr);
+void __sanitizer_syscall_pre_impl_init_module(long umod, long len, long uargs);
+void __sanitizer_syscall_post_impl_init_module(long res, long umod, long len,
+ long uargs);
+void __sanitizer_syscall_pre_impl_delete_module(long name_user, long flags);
+void __sanitizer_syscall_post_impl_delete_module(long res, long name_user,
+ long flags);
+void __sanitizer_syscall_pre_impl_rt_sigprocmask(long how, long set, long oset,
+ long sigsetsize);
+void __sanitizer_syscall_post_impl_rt_sigprocmask(long res, long how, long set,
+ long oset, long sigsetsize);
+void __sanitizer_syscall_pre_impl_rt_sigpending(long set, long sigsetsize);
+void __sanitizer_syscall_post_impl_rt_sigpending(long res, long set,
+ long sigsetsize);
+void __sanitizer_syscall_pre_impl_rt_sigtimedwait(long uthese, long uinfo,
+ long uts, long sigsetsize);
+void __sanitizer_syscall_post_impl_rt_sigtimedwait(long res, long uthese,
+ long uinfo, long uts,
+ long sigsetsize);
+void __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo(long tgid, long pid,
+ long sig, long uinfo);
+void __sanitizer_syscall_post_impl_rt_tgsigqueueinfo(long res, long tgid,
+ long pid, long sig,
+ long uinfo);
+void __sanitizer_syscall_pre_impl_kill(long pid, long sig);
+void __sanitizer_syscall_post_impl_kill(long res, long pid, long sig);
+void __sanitizer_syscall_pre_impl_tgkill(long tgid, long pid, long sig);
+void __sanitizer_syscall_post_impl_tgkill(long res, long tgid, long pid,
+ long sig);
+void __sanitizer_syscall_pre_impl_tkill(long pid, long sig);
+void __sanitizer_syscall_post_impl_tkill(long res, long pid, long sig);
+void __sanitizer_syscall_pre_impl_rt_sigqueueinfo(long pid, long sig,
+ long uinfo);
+void __sanitizer_syscall_post_impl_rt_sigqueueinfo(long res, long pid, long sig,
+ long uinfo);
+void __sanitizer_syscall_pre_impl_sgetmask();
+void __sanitizer_syscall_post_impl_sgetmask(long res);
+void __sanitizer_syscall_pre_impl_ssetmask(long newmask);
+void __sanitizer_syscall_post_impl_ssetmask(long res, long newmask);
+void __sanitizer_syscall_pre_impl_signal(long sig, long handler);
+void __sanitizer_syscall_post_impl_signal(long res, long sig, long handler);
+void __sanitizer_syscall_pre_impl_pause();
+void __sanitizer_syscall_post_impl_pause(long res);
+void __sanitizer_syscall_pre_impl_sync();
+void __sanitizer_syscall_post_impl_sync(long res);
+void __sanitizer_syscall_pre_impl_fsync(long fd);
+void __sanitizer_syscall_post_impl_fsync(long res, long fd);
+void __sanitizer_syscall_pre_impl_fdatasync(long fd);
+void __sanitizer_syscall_post_impl_fdatasync(long res, long fd);
+void __sanitizer_syscall_pre_impl_bdflush(long func, long data);
+void __sanitizer_syscall_post_impl_bdflush(long res, long func, long data);
+void __sanitizer_syscall_pre_impl_mount(long dev_name, long dir_name, long type,
+ long flags, long data);
+void __sanitizer_syscall_post_impl_mount(long res, long dev_name, long dir_name,
+ long type, long flags, long data);
+void __sanitizer_syscall_pre_impl_umount(long name, long flags);
+void __sanitizer_syscall_post_impl_umount(long res, long name, long flags);
+void __sanitizer_syscall_pre_impl_oldumount(long name);
+void __sanitizer_syscall_post_impl_oldumount(long res, long name);
+void __sanitizer_syscall_pre_impl_truncate(long path, long length);
+void __sanitizer_syscall_post_impl_truncate(long res, long path, long length);
+void __sanitizer_syscall_pre_impl_ftruncate(long fd, long length);
+void __sanitizer_syscall_post_impl_ftruncate(long res, long fd, long length);
+void __sanitizer_syscall_pre_impl_stat(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_stat(long res, long filename, long statbuf);
+void __sanitizer_syscall_pre_impl_statfs(long path, long buf);
+void __sanitizer_syscall_post_impl_statfs(long res, long path, long buf);
+void __sanitizer_syscall_pre_impl_statfs64(long path, long sz, long buf);
+void __sanitizer_syscall_post_impl_statfs64(long res, long path, long sz,
+ long buf);
+void __sanitizer_syscall_pre_impl_fstatfs(long fd, long buf);
+void __sanitizer_syscall_post_impl_fstatfs(long res, long fd, long buf);
+void __sanitizer_syscall_pre_impl_fstatfs64(long fd, long sz, long buf);
+void __sanitizer_syscall_post_impl_fstatfs64(long res, long fd, long sz,
+ long buf);
+void __sanitizer_syscall_pre_impl_lstat(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_lstat(long res, long filename, long statbuf);
+void __sanitizer_syscall_pre_impl_fstat(long fd, long statbuf);
+void __sanitizer_syscall_post_impl_fstat(long res, long fd, long statbuf);
+void __sanitizer_syscall_pre_impl_newstat(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_newstat(long res, long filename,
+ long statbuf);
+void __sanitizer_syscall_pre_impl_newlstat(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_newlstat(long res, long filename,
+ long statbuf);
+void __sanitizer_syscall_pre_impl_newfstat(long fd, long statbuf);
+void __sanitizer_syscall_post_impl_newfstat(long res, long fd, long statbuf);
+void __sanitizer_syscall_pre_impl_ustat(long dev, long ubuf);
+void __sanitizer_syscall_post_impl_ustat(long res, long dev, long ubuf);
+void __sanitizer_syscall_pre_impl_stat64(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_stat64(long res, long filename,
+ long statbuf);
+void __sanitizer_syscall_pre_impl_fstat64(long fd, long statbuf);
+void __sanitizer_syscall_post_impl_fstat64(long res, long fd, long statbuf);
+void __sanitizer_syscall_pre_impl_lstat64(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_lstat64(long res, long filename,
+ long statbuf);
+void __sanitizer_syscall_pre_impl_setxattr(long path, long name, long value,
+ long size, long flags);
+void __sanitizer_syscall_post_impl_setxattr(long res, long path, long name,
+ long value, long size, long flags);
+void __sanitizer_syscall_pre_impl_lsetxattr(long path, long name, long value,
+ long size, long flags);
+void __sanitizer_syscall_post_impl_lsetxattr(long res, long path, long name,
+ long value, long size, long flags);
+void __sanitizer_syscall_pre_impl_fsetxattr(long fd, long name, long value,
+ long size, long flags);
+void __sanitizer_syscall_post_impl_fsetxattr(long res, long fd, long name,
+ long value, long size, long flags);
+void __sanitizer_syscall_pre_impl_getxattr(long path, long name, long value,
+ long size);
+void __sanitizer_syscall_post_impl_getxattr(long res, long path, long name,
+ long value, long size);
+void __sanitizer_syscall_pre_impl_lgetxattr(long path, long name, long value,
+ long size);
+void __sanitizer_syscall_post_impl_lgetxattr(long res, long path, long name,
+ long value, long size);
+void __sanitizer_syscall_pre_impl_fgetxattr(long fd, long name, long value,
+ long size);
+void __sanitizer_syscall_post_impl_fgetxattr(long res, long fd, long name,
+ long value, long size);
+void __sanitizer_syscall_pre_impl_listxattr(long path, long list, long size);
+void __sanitizer_syscall_post_impl_listxattr(long res, long path, long list,
+ long size);
+void __sanitizer_syscall_pre_impl_llistxattr(long path, long list, long size);
+void __sanitizer_syscall_post_impl_llistxattr(long res, long path, long list,
+ long size);
+void __sanitizer_syscall_pre_impl_flistxattr(long fd, long list, long size);
+void __sanitizer_syscall_post_impl_flistxattr(long res, long fd, long list,
+ long size);
+void __sanitizer_syscall_pre_impl_removexattr(long path, long name);
+void __sanitizer_syscall_post_impl_removexattr(long res, long path, long name);
+void __sanitizer_syscall_pre_impl_lremovexattr(long path, long name);
+void __sanitizer_syscall_post_impl_lremovexattr(long res, long path, long name);
+void __sanitizer_syscall_pre_impl_fremovexattr(long fd, long name);
+void __sanitizer_syscall_post_impl_fremovexattr(long res, long fd, long name);
+void __sanitizer_syscall_pre_impl_brk(long brk);
+void __sanitizer_syscall_post_impl_brk(long res, long brk);
+void __sanitizer_syscall_pre_impl_mprotect(long start, long len, long prot);
+void __sanitizer_syscall_post_impl_mprotect(long res, long start, long len,
+ long prot);
+void __sanitizer_syscall_pre_impl_mremap(long addr, long old_len, long new_len,
+ long flags, long new_addr);
+void __sanitizer_syscall_post_impl_mremap(long res, long addr, long old_len,
+ long new_len, long flags,
+ long new_addr);
+void __sanitizer_syscall_pre_impl_remap_file_pages(long start, long size,
+ long prot, long pgoff,
+ long flags);
+void __sanitizer_syscall_post_impl_remap_file_pages(long res, long start,
+ long size, long prot,
+ long pgoff, long flags);
+void __sanitizer_syscall_pre_impl_msync(long start, long len, long flags);
+void __sanitizer_syscall_post_impl_msync(long res, long start, long len,
+ long flags);
+void __sanitizer_syscall_pre_impl_munmap(long addr, long len);
+void __sanitizer_syscall_post_impl_munmap(long res, long addr, long len);
+void __sanitizer_syscall_pre_impl_mlock(long start, long len);
+void __sanitizer_syscall_post_impl_mlock(long res, long start, long len);
+void __sanitizer_syscall_pre_impl_munlock(long start, long len);
+void __sanitizer_syscall_post_impl_munlock(long res, long start, long len);
+void __sanitizer_syscall_pre_impl_mlockall(long flags);
+void __sanitizer_syscall_post_impl_mlockall(long res, long flags);
+void __sanitizer_syscall_pre_impl_munlockall();
+void __sanitizer_syscall_post_impl_munlockall(long res);
+void __sanitizer_syscall_pre_impl_madvise(long start, long len, long behavior);
+void __sanitizer_syscall_post_impl_madvise(long res, long start, long len,
+ long behavior);
+void __sanitizer_syscall_pre_impl_mincore(long start, long len, long vec);
+void __sanitizer_syscall_post_impl_mincore(long res, long start, long len,
+ long vec);
+void __sanitizer_syscall_pre_impl_pivot_root(long new_root, long put_old);
+void __sanitizer_syscall_post_impl_pivot_root(long res, long new_root,
+ long put_old);
+void __sanitizer_syscall_pre_impl_chroot(long filename);
+void __sanitizer_syscall_post_impl_chroot(long res, long filename);
+void __sanitizer_syscall_pre_impl_mknod(long filename, long mode, long dev);
+void __sanitizer_syscall_post_impl_mknod(long res, long filename, long mode,
+ long dev);
+void __sanitizer_syscall_pre_impl_link(long oldname, long newname);
+void __sanitizer_syscall_post_impl_link(long res, long oldname, long newname);
+void __sanitizer_syscall_pre_impl_symlink(long old, long new_);
+void __sanitizer_syscall_post_impl_symlink(long res, long old, long new_);
+void __sanitizer_syscall_pre_impl_unlink(long pathname);
+void __sanitizer_syscall_post_impl_unlink(long res, long pathname);
+void __sanitizer_syscall_pre_impl_rename(long oldname, long newname);
+void __sanitizer_syscall_post_impl_rename(long res, long oldname, long newname);
+void __sanitizer_syscall_pre_impl_chmod(long filename, long mode);
+void __sanitizer_syscall_post_impl_chmod(long res, long filename, long mode);
+void __sanitizer_syscall_pre_impl_fchmod(long fd, long mode);
+void __sanitizer_syscall_post_impl_fchmod(long res, long fd, long mode);
+void __sanitizer_syscall_pre_impl_fcntl(long fd, long cmd, long arg);
+void __sanitizer_syscall_post_impl_fcntl(long res, long fd, long cmd, long arg);
+void __sanitizer_syscall_pre_impl_fcntl64(long fd, long cmd, long arg);
+void __sanitizer_syscall_post_impl_fcntl64(long res, long fd, long cmd,
+ long arg);
+void __sanitizer_syscall_pre_impl_pipe(long fildes);
+void __sanitizer_syscall_post_impl_pipe(long res, long fildes);
+void __sanitizer_syscall_pre_impl_pipe2(long fildes, long flags);
+void __sanitizer_syscall_post_impl_pipe2(long res, long fildes, long flags);
+void __sanitizer_syscall_pre_impl_dup(long fildes);
+void __sanitizer_syscall_post_impl_dup(long res, long fildes);
+void __sanitizer_syscall_pre_impl_dup2(long oldfd, long newfd);
+void __sanitizer_syscall_post_impl_dup2(long res, long oldfd, long newfd);
+void __sanitizer_syscall_pre_impl_dup3(long oldfd, long newfd, long flags);
+void __sanitizer_syscall_post_impl_dup3(long res, long oldfd, long newfd,
+ long flags);
+void __sanitizer_syscall_pre_impl_ioperm(long from, long num, long on);
+void __sanitizer_syscall_post_impl_ioperm(long res, long from, long num,
+ long on);
+void __sanitizer_syscall_pre_impl_ioctl(long fd, long cmd, long arg);
+void __sanitizer_syscall_post_impl_ioctl(long res, long fd, long cmd, long arg);
+void __sanitizer_syscall_pre_impl_flock(long fd, long cmd);
+void __sanitizer_syscall_post_impl_flock(long res, long fd, long cmd);
+void __sanitizer_syscall_pre_impl_io_setup(long nr_reqs, long ctx);
+void __sanitizer_syscall_post_impl_io_setup(long res, long nr_reqs, long ctx);
+void __sanitizer_syscall_pre_impl_io_destroy(long ctx);
+void __sanitizer_syscall_post_impl_io_destroy(long res, long ctx);
+void __sanitizer_syscall_pre_impl_io_getevents(long ctx_id, long min_nr,
+ long nr, long events,
+ long timeout);
+void __sanitizer_syscall_post_impl_io_getevents(long res, long ctx_id,
+ long min_nr, long nr,
+ long events, long timeout);
+void __sanitizer_syscall_pre_impl_io_submit(long ctx_id, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_io_submit(long res, long ctx_id, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_io_cancel(long ctx_id, long iocb,
+ long result);
+void __sanitizer_syscall_post_impl_io_cancel(long res, long ctx_id, long iocb,
+ long result);
+void __sanitizer_syscall_pre_impl_sendfile(long out_fd, long in_fd, long offset,
+ long count);
+void __sanitizer_syscall_post_impl_sendfile(long res, long out_fd, long in_fd,
+ long offset, long count);
+void __sanitizer_syscall_pre_impl_sendfile64(long out_fd, long in_fd,
+ long offset, long count);
+void __sanitizer_syscall_post_impl_sendfile64(long res, long out_fd, long in_fd,
+ long offset, long count);
+void __sanitizer_syscall_pre_impl_readlink(long path, long buf, long bufsiz);
+void __sanitizer_syscall_post_impl_readlink(long res, long path, long buf,
+ long bufsiz);
+void __sanitizer_syscall_pre_impl_creat(long pathname, long mode);
+void __sanitizer_syscall_post_impl_creat(long res, long pathname, long mode);
+void __sanitizer_syscall_pre_impl_open(long filename, long flags, long mode);
+void __sanitizer_syscall_post_impl_open(long res, long filename, long flags,
+ long mode);
+void __sanitizer_syscall_pre_impl_close(long fd);
+void __sanitizer_syscall_post_impl_close(long res, long fd);
+void __sanitizer_syscall_pre_impl_access(long filename, long mode);
+void __sanitizer_syscall_post_impl_access(long res, long filename, long mode);
+void __sanitizer_syscall_pre_impl_vhangup();
+void __sanitizer_syscall_post_impl_vhangup(long res);
+void __sanitizer_syscall_pre_impl_chown(long filename, long user, long group);
+void __sanitizer_syscall_post_impl_chown(long res, long filename, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_lchown(long filename, long user, long group);
+void __sanitizer_syscall_post_impl_lchown(long res, long filename, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_fchown(long fd, long user, long group);
+void __sanitizer_syscall_post_impl_fchown(long res, long fd, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_chown16(long filename, long user, long group);
+void __sanitizer_syscall_post_impl_chown16(long res, long filename, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_lchown16(long filename, long user,
+ long group);
+void __sanitizer_syscall_post_impl_lchown16(long res, long filename, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_fchown16(long fd, long user, long group);
+void __sanitizer_syscall_post_impl_fchown16(long res, long fd, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_setregid16(long rgid, long egid);
+void __sanitizer_syscall_post_impl_setregid16(long res, long rgid, long egid);
+void __sanitizer_syscall_pre_impl_setgid16(long gid);
+void __sanitizer_syscall_post_impl_setgid16(long res, long gid);
+void __sanitizer_syscall_pre_impl_setreuid16(long ruid, long euid);
+void __sanitizer_syscall_post_impl_setreuid16(long res, long ruid, long euid);
+void __sanitizer_syscall_pre_impl_setuid16(long uid);
+void __sanitizer_syscall_post_impl_setuid16(long res, long uid);
+void __sanitizer_syscall_pre_impl_setresuid16(long ruid, long euid, long suid);
+void __sanitizer_syscall_post_impl_setresuid16(long res, long ruid, long euid,
+ long suid);
+void __sanitizer_syscall_pre_impl_getresuid16(long ruid, long euid, long suid);
+void __sanitizer_syscall_post_impl_getresuid16(long res, long ruid, long euid,
+ long suid);
+void __sanitizer_syscall_pre_impl_setresgid16(long rgid, long egid, long sgid);
+void __sanitizer_syscall_post_impl_setresgid16(long res, long rgid, long egid,
+ long sgid);
+void __sanitizer_syscall_pre_impl_getresgid16(long rgid, long egid, long sgid);
+void __sanitizer_syscall_post_impl_getresgid16(long res, long rgid, long egid,
+ long sgid);
+void __sanitizer_syscall_pre_impl_setfsuid16(long uid);
+void __sanitizer_syscall_post_impl_setfsuid16(long res, long uid);
+void __sanitizer_syscall_pre_impl_setfsgid16(long gid);
+void __sanitizer_syscall_post_impl_setfsgid16(long res, long gid);
+void __sanitizer_syscall_pre_impl_getgroups16(long gidsetsize, long grouplist);
+void __sanitizer_syscall_post_impl_getgroups16(long res, long gidsetsize,
+ long grouplist);
+void __sanitizer_syscall_pre_impl_setgroups16(long gidsetsize, long grouplist);
+void __sanitizer_syscall_post_impl_setgroups16(long res, long gidsetsize,
+ long grouplist);
+void __sanitizer_syscall_pre_impl_getuid16();
+void __sanitizer_syscall_post_impl_getuid16(long res);
+void __sanitizer_syscall_pre_impl_geteuid16();
+void __sanitizer_syscall_post_impl_geteuid16(long res);
+void __sanitizer_syscall_pre_impl_getgid16();
+void __sanitizer_syscall_post_impl_getgid16(long res);
+void __sanitizer_syscall_pre_impl_getegid16();
+void __sanitizer_syscall_post_impl_getegid16(long res);
+void __sanitizer_syscall_pre_impl_utime(long filename, long times);
+void __sanitizer_syscall_post_impl_utime(long res, long filename, long times);
+void __sanitizer_syscall_pre_impl_utimes(long filename, long utimes);
+void __sanitizer_syscall_post_impl_utimes(long res, long filename, long utimes);
+void __sanitizer_syscall_pre_impl_lseek(long fd, long offset, long origin);
+void __sanitizer_syscall_post_impl_lseek(long res, long fd, long offset,
+ long origin);
+void __sanitizer_syscall_pre_impl_llseek(long fd, long offset_high,
+ long offset_low, long result,
+ long origin);
+void __sanitizer_syscall_post_impl_llseek(long res, long fd, long offset_high,
+ long offset_low, long result,
+ long origin);
+void __sanitizer_syscall_pre_impl_read(long fd, long buf, long count);
+void __sanitizer_syscall_post_impl_read(long res, long fd, long buf,
+ long count);
+void __sanitizer_syscall_pre_impl_readv(long fd, long vec, long vlen);
+void __sanitizer_syscall_post_impl_readv(long res, long fd, long vec,
+ long vlen);
+void __sanitizer_syscall_pre_impl_write(long fd, long buf, long count);
+void __sanitizer_syscall_post_impl_write(long res, long fd, long buf,
+ long count);
+void __sanitizer_syscall_pre_impl_writev(long fd, long vec, long vlen);
+void __sanitizer_syscall_post_impl_writev(long res, long fd, long vec,
+ long vlen);
+
+#ifdef _LP64
+void __sanitizer_syscall_pre_impl_pread64(long fd, long buf, long count,
+ long pos);
+void __sanitizer_syscall_post_impl_pread64(long res, long fd, long buf,
+ long count, long pos);
+void __sanitizer_syscall_pre_impl_pwrite64(long fd, long buf, long count,
+ long pos);
+void __sanitizer_syscall_post_impl_pwrite64(long res, long fd, long buf,
+ long count, long pos);
+#else
+void __sanitizer_syscall_pre_impl_pread64(long fd, long buf, long count,
+ long pos0, long pos1);
+void __sanitizer_syscall_post_impl_pread64(long res, long fd, long buf,
+ long count, long pos0, long pos1);
+void __sanitizer_syscall_pre_impl_pwrite64(long fd, long buf, long count,
+ long pos0, long pos1);
+void __sanitizer_syscall_post_impl_pwrite64(long res, long fd, long buf,
+ long count, long pos0, long pos1);
+#endif
+
+void __sanitizer_syscall_pre_impl_preadv(long fd, long vec, long vlen,
+ long pos_l, long pos_h);
+void __sanitizer_syscall_post_impl_preadv(long res, long fd, long vec,
+ long vlen, long pos_l, long pos_h);
+void __sanitizer_syscall_pre_impl_pwritev(long fd, long vec, long vlen,
+ long pos_l, long pos_h);
+void __sanitizer_syscall_post_impl_pwritev(long res, long fd, long vec,
+ long vlen, long pos_l, long pos_h);
+void __sanitizer_syscall_pre_impl_getcwd(long buf, long size);
+void __sanitizer_syscall_post_impl_getcwd(long res, long buf, long size);
+void __sanitizer_syscall_pre_impl_mkdir(long pathname, long mode);
+void __sanitizer_syscall_post_impl_mkdir(long res, long pathname, long mode);
+void __sanitizer_syscall_pre_impl_chdir(long filename);
+void __sanitizer_syscall_post_impl_chdir(long res, long filename);
+void __sanitizer_syscall_pre_impl_fchdir(long fd);
+void __sanitizer_syscall_post_impl_fchdir(long res, long fd);
+void __sanitizer_syscall_pre_impl_rmdir(long pathname);
+void __sanitizer_syscall_post_impl_rmdir(long res, long pathname);
+void __sanitizer_syscall_pre_impl_lookup_dcookie(long cookie64, long buf,
+ long len);
+void __sanitizer_syscall_post_impl_lookup_dcookie(long res, long cookie64,
+ long buf, long len);
+void __sanitizer_syscall_pre_impl_quotactl(long cmd, long special, long id,
+ long addr);
+void __sanitizer_syscall_post_impl_quotactl(long res, long cmd, long special,
+ long id, long addr);
+void __sanitizer_syscall_pre_impl_getdents(long fd, long dirent, long count);
+void __sanitizer_syscall_post_impl_getdents(long res, long fd, long dirent,
+ long count);
+void __sanitizer_syscall_pre_impl_getdents64(long fd, long dirent, long count);
+void __sanitizer_syscall_post_impl_getdents64(long res, long fd, long dirent,
+ long count);
+void __sanitizer_syscall_pre_impl_setsockopt(long fd, long level, long optname,
+ long optval, long optlen);
+void __sanitizer_syscall_post_impl_setsockopt(long res, long fd, long level,
+ long optname, long optval,
+ long optlen);
+void __sanitizer_syscall_pre_impl_getsockopt(long fd, long level, long optname,
+ long optval, long optlen);
+void __sanitizer_syscall_post_impl_getsockopt(long res, long fd, long level,
+ long optname, long optval,
+ long optlen);
+void __sanitizer_syscall_pre_impl_bind(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_bind(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_connect(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_connect(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_accept(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_accept(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_accept4(long arg0, long arg1, long arg2,
+ long arg3);
+void __sanitizer_syscall_post_impl_accept4(long res, long arg0, long arg1,
+ long arg2, long arg3);
+void __sanitizer_syscall_pre_impl_getsockname(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_getsockname(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_getpeername(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_getpeername(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_send(long arg0, long arg1, long arg2,
+ long arg3);
+void __sanitizer_syscall_post_impl_send(long res, long arg0, long arg1,
+ long arg2, long arg3);
+void __sanitizer_syscall_pre_impl_sendto(long arg0, long arg1, long arg2,
+ long arg3, long arg4, long arg5);
+void __sanitizer_syscall_post_impl_sendto(long res, long arg0, long arg1,
+ long arg2, long arg3, long arg4,
+ long arg5);
+void __sanitizer_syscall_pre_impl_sendmsg(long fd, long msg, long flags);
+void __sanitizer_syscall_post_impl_sendmsg(long res, long fd, long msg,
+ long flags);
+void __sanitizer_syscall_pre_impl_sendmmsg(long fd, long msg, long vlen,
+ long flags);
+void __sanitizer_syscall_post_impl_sendmmsg(long res, long fd, long msg,
+ long vlen, long flags);
+void __sanitizer_syscall_pre_impl_recv(long arg0, long arg1, long arg2,
+ long arg3);
+void __sanitizer_syscall_post_impl_recv(long res, long arg0, long arg1,
+ long arg2, long arg3);
+void __sanitizer_syscall_pre_impl_recvfrom(long arg0, long arg1, long arg2,
+ long arg3, long arg4, long arg5);
+void __sanitizer_syscall_post_impl_recvfrom(long res, long arg0, long arg1,
+ long arg2, long arg3, long arg4,
+ long arg5);
+void __sanitizer_syscall_pre_impl_recvmsg(long fd, long msg, long flags);
+void __sanitizer_syscall_post_impl_recvmsg(long res, long fd, long msg,
+ long flags);
+void __sanitizer_syscall_pre_impl_recvmmsg(long fd, long msg, long vlen,
+ long flags, long timeout);
+void __sanitizer_syscall_post_impl_recvmmsg(long res, long fd, long msg,
+ long vlen, long flags,
+ long timeout);
+void __sanitizer_syscall_pre_impl_socket(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_socket(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_socketpair(long arg0, long arg1, long arg2,
+ long arg3);
+void __sanitizer_syscall_post_impl_socketpair(long res, long arg0, long arg1,
+ long arg2, long arg3);
+void __sanitizer_syscall_pre_impl_socketcall(long call, long args);
+void __sanitizer_syscall_post_impl_socketcall(long res, long call, long args);
+void __sanitizer_syscall_pre_impl_listen(long arg0, long arg1);
+void __sanitizer_syscall_post_impl_listen(long res, long arg0, long arg1);
+void __sanitizer_syscall_pre_impl_poll(long ufds, long nfds, long timeout);
+void __sanitizer_syscall_post_impl_poll(long res, long ufds, long nfds,
+ long timeout);
+void __sanitizer_syscall_pre_impl_select(long n, long inp, long outp, long exp,
+ long tvp);
+void __sanitizer_syscall_post_impl_select(long res, long n, long inp, long outp,
+ long exp, long tvp);
+void __sanitizer_syscall_pre_impl_old_select(long arg);
+void __sanitizer_syscall_post_impl_old_select(long res, long arg);
+void __sanitizer_syscall_pre_impl_epoll_create(long size);
+void __sanitizer_syscall_post_impl_epoll_create(long res, long size);
+void __sanitizer_syscall_pre_impl_epoll_create1(long flags);
+void __sanitizer_syscall_post_impl_epoll_create1(long res, long flags);
+void __sanitizer_syscall_pre_impl_epoll_ctl(long epfd, long op, long fd,
+ long event);
+void __sanitizer_syscall_post_impl_epoll_ctl(long res, long epfd, long op,
+ long fd, long event);
+void __sanitizer_syscall_pre_impl_epoll_wait(long epfd, long events,
+ long maxevents, long timeout);
+void __sanitizer_syscall_post_impl_epoll_wait(long res, long epfd, long events,
+ long maxevents, long timeout);
+void __sanitizer_syscall_pre_impl_epoll_pwait(long epfd, long events,
+ long maxevents, long timeout,
+ long sigmask, long sigsetsize);
+void __sanitizer_syscall_post_impl_epoll_pwait(long res, long epfd, long events,
+ long maxevents, long timeout,
+ long sigmask, long sigsetsize);
+void __sanitizer_syscall_pre_impl_gethostname(long name, long len);
+void __sanitizer_syscall_post_impl_gethostname(long res, long name, long len);
+void __sanitizer_syscall_pre_impl_sethostname(long name, long len);
+void __sanitizer_syscall_post_impl_sethostname(long res, long name, long len);
+void __sanitizer_syscall_pre_impl_setdomainname(long name, long len);
+void __sanitizer_syscall_post_impl_setdomainname(long res, long name, long len);
+void __sanitizer_syscall_pre_impl_newuname(long name);
+void __sanitizer_syscall_post_impl_newuname(long res, long name);
+void __sanitizer_syscall_pre_impl_uname(long arg0);
+void __sanitizer_syscall_post_impl_uname(long res, long arg0);
+void __sanitizer_syscall_pre_impl_olduname(long arg0);
+void __sanitizer_syscall_post_impl_olduname(long res, long arg0);
+void __sanitizer_syscall_pre_impl_getrlimit(long resource, long rlim);
+void __sanitizer_syscall_post_impl_getrlimit(long res, long resource,
+ long rlim);
+void __sanitizer_syscall_pre_impl_old_getrlimit(long resource, long rlim);
+void __sanitizer_syscall_post_impl_old_getrlimit(long res, long resource,
+ long rlim);
+void __sanitizer_syscall_pre_impl_setrlimit(long resource, long rlim);
+void __sanitizer_syscall_post_impl_setrlimit(long res, long resource,
+ long rlim);
+void __sanitizer_syscall_pre_impl_prlimit64(long pid, long resource,
+ long new_rlim, long old_rlim);
+void __sanitizer_syscall_post_impl_prlimit64(long res, long pid, long resource,
+ long new_rlim, long old_rlim);
+void __sanitizer_syscall_pre_impl_getrusage(long who, long ru);
+void __sanitizer_syscall_post_impl_getrusage(long res, long who, long ru);
+void __sanitizer_syscall_pre_impl_umask(long mask);
+void __sanitizer_syscall_post_impl_umask(long res, long mask);
+void __sanitizer_syscall_pre_impl_msgget(long key, long msgflg);
+void __sanitizer_syscall_post_impl_msgget(long res, long key, long msgflg);
+void __sanitizer_syscall_pre_impl_msgsnd(long msqid, long msgp, long msgsz,
+ long msgflg);
+void __sanitizer_syscall_post_impl_msgsnd(long res, long msqid, long msgp,
+ long msgsz, long msgflg);
+void __sanitizer_syscall_pre_impl_msgrcv(long msqid, long msgp, long msgsz,
+ long msgtyp, long msgflg);
+void __sanitizer_syscall_post_impl_msgrcv(long res, long msqid, long msgp,
+ long msgsz, long msgtyp, long msgflg);
+void __sanitizer_syscall_pre_impl_msgctl(long msqid, long cmd, long buf);
+void __sanitizer_syscall_post_impl_msgctl(long res, long msqid, long cmd,
+ long buf);
+void __sanitizer_syscall_pre_impl_semget(long key, long nsems, long semflg);
+void __sanitizer_syscall_post_impl_semget(long res, long key, long nsems,
+ long semflg);
+void __sanitizer_syscall_pre_impl_semop(long semid, long sops, long nsops);
+void __sanitizer_syscall_post_impl_semop(long res, long semid, long sops,
+ long nsops);
+void __sanitizer_syscall_pre_impl_semctl(long semid, long semnum, long cmd,
+ long arg);
+void __sanitizer_syscall_post_impl_semctl(long res, long semid, long semnum,
+ long cmd, long arg);
+void __sanitizer_syscall_pre_impl_semtimedop(long semid, long sops, long nsops,
+ long timeout);
+void __sanitizer_syscall_post_impl_semtimedop(long res, long semid, long sops,
+ long nsops, long timeout);
+void __sanitizer_syscall_pre_impl_shmat(long shmid, long shmaddr, long shmflg);
+void __sanitizer_syscall_post_impl_shmat(long res, long shmid, long shmaddr,
+ long shmflg);
+void __sanitizer_syscall_pre_impl_shmget(long key, long size, long flag);
+void __sanitizer_syscall_post_impl_shmget(long res, long key, long size,
+ long flag);
+void __sanitizer_syscall_pre_impl_shmdt(long shmaddr);
+void __sanitizer_syscall_post_impl_shmdt(long res, long shmaddr);
+void __sanitizer_syscall_pre_impl_shmctl(long shmid, long cmd, long buf);
+void __sanitizer_syscall_post_impl_shmctl(long res, long shmid, long cmd,
+ long buf);
+void __sanitizer_syscall_pre_impl_ipc(long call, long first, long second,
+ long third, long ptr, long fifth);
+void __sanitizer_syscall_post_impl_ipc(long res, long call, long first,
+ long second, long third, long ptr,
+ long fifth);
+void __sanitizer_syscall_pre_impl_mq_open(long name, long oflag, long mode,
+ long attr);
+void __sanitizer_syscall_post_impl_mq_open(long res, long name, long oflag,
+ long mode, long attr);
+void __sanitizer_syscall_pre_impl_mq_unlink(long name);
+void __sanitizer_syscall_post_impl_mq_unlink(long res, long name);
+void __sanitizer_syscall_pre_impl_mq_timedsend(long mqdes, long msg_ptr,
+ long msg_len, long msg_prio,
+ long abs_timeout);
+void __sanitizer_syscall_post_impl_mq_timedsend(long res, long mqdes,
+ long msg_ptr, long msg_len,
+ long msg_prio,
+ long abs_timeout);
+void __sanitizer_syscall_pre_impl_mq_timedreceive(long mqdes, long msg_ptr,
+ long msg_len, long msg_prio,
+ long abs_timeout);
+void __sanitizer_syscall_post_impl_mq_timedreceive(long res, long mqdes,
+ long msg_ptr, long msg_len,
+ long msg_prio,
+ long abs_timeout);
+void __sanitizer_syscall_pre_impl_mq_notify(long mqdes, long notification);
+void __sanitizer_syscall_post_impl_mq_notify(long res, long mqdes,
+ long notification);
+void __sanitizer_syscall_pre_impl_mq_getsetattr(long mqdes, long mqstat,
+ long omqstat);
+void __sanitizer_syscall_post_impl_mq_getsetattr(long res, long mqdes,
+ long mqstat, long omqstat);
+void __sanitizer_syscall_pre_impl_pciconfig_iobase(long which, long bus,
+ long devfn);
+void __sanitizer_syscall_post_impl_pciconfig_iobase(long res, long which,
+ long bus, long devfn);
+void __sanitizer_syscall_pre_impl_pciconfig_read(long bus, long dfn, long off,
+ long len, long buf);
+void __sanitizer_syscall_post_impl_pciconfig_read(long res, long bus, long dfn,
+ long off, long len, long buf);
+void __sanitizer_syscall_pre_impl_pciconfig_write(long bus, long dfn, long off,
+ long len, long buf);
+void __sanitizer_syscall_post_impl_pciconfig_write(long res, long bus, long dfn,
+ long off, long len,
+ long buf);
+void __sanitizer_syscall_pre_impl_swapon(long specialfile, long swap_flags);
+void __sanitizer_syscall_post_impl_swapon(long res, long specialfile,
+ long swap_flags);
+void __sanitizer_syscall_pre_impl_swapoff(long specialfile);
+void __sanitizer_syscall_post_impl_swapoff(long res, long specialfile);
+void __sanitizer_syscall_pre_impl_sysctl(long args);
+void __sanitizer_syscall_post_impl_sysctl(long res, long args);
+void __sanitizer_syscall_pre_impl_sysinfo(long info);
+void __sanitizer_syscall_post_impl_sysinfo(long res, long info);
+void __sanitizer_syscall_pre_impl_sysfs(long option, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_sysfs(long res, long option, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_syslog(long type, long buf, long len);
+void __sanitizer_syscall_post_impl_syslog(long res, long type, long buf,
+ long len);
+void __sanitizer_syscall_pre_impl_uselib(long library);
+void __sanitizer_syscall_post_impl_uselib(long res, long library);
+void __sanitizer_syscall_pre_impl_ni_syscall();
+void __sanitizer_syscall_post_impl_ni_syscall(long res);
+void __sanitizer_syscall_pre_impl_ptrace(long request, long pid, long addr,
+ long data);
+void __sanitizer_syscall_post_impl_ptrace(long res, long request, long pid,
+ long addr, long data);
+void __sanitizer_syscall_pre_impl_add_key(long _type, long _description,
+ long _payload, long plen,
+ long destringid);
+void __sanitizer_syscall_post_impl_add_key(long res, long _type,
+ long _description, long _payload,
+ long plen, long destringid);
+void __sanitizer_syscall_pre_impl_request_key(long _type, long _description,
+ long _callout_info,
+ long destringid);
+void __sanitizer_syscall_post_impl_request_key(long res, long _type,
+ long _description,
+ long _callout_info,
+ long destringid);
+void __sanitizer_syscall_pre_impl_keyctl(long cmd, long arg2, long arg3,
+ long arg4, long arg5);
+void __sanitizer_syscall_post_impl_keyctl(long res, long cmd, long arg2,
+ long arg3, long arg4, long arg5);
+void __sanitizer_syscall_pre_impl_ioprio_set(long which, long who, long ioprio);
+void __sanitizer_syscall_post_impl_ioprio_set(long res, long which, long who,
+ long ioprio);
+void __sanitizer_syscall_pre_impl_ioprio_get(long which, long who);
+void __sanitizer_syscall_post_impl_ioprio_get(long res, long which, long who);
+void __sanitizer_syscall_pre_impl_set_mempolicy(long mode, long nmask,
+ long maxnode);
+void __sanitizer_syscall_post_impl_set_mempolicy(long res, long mode,
+ long nmask, long maxnode);
+void __sanitizer_syscall_pre_impl_migrate_pages(long pid, long maxnode,
+ long from, long to);
+void __sanitizer_syscall_post_impl_migrate_pages(long res, long pid,
+ long maxnode, long from,
+ long to);
+void __sanitizer_syscall_pre_impl_move_pages(long pid, long nr_pages,
+ long pages, long nodes,
+ long status, long flags);
+void __sanitizer_syscall_post_impl_move_pages(long res, long pid, long nr_pages,
+ long pages, long nodes,
+ long status, long flags);
+void __sanitizer_syscall_pre_impl_mbind(long start, long len, long mode,
+ long nmask, long maxnode, long flags);
+void __sanitizer_syscall_post_impl_mbind(long res, long start, long len,
+ long mode, long nmask, long maxnode,
+ long flags);
+void __sanitizer_syscall_pre_impl_get_mempolicy(long policy, long nmask,
+ long maxnode, long addr,
+ long flags);
+void __sanitizer_syscall_post_impl_get_mempolicy(long res, long policy,
+ long nmask, long maxnode,
+ long addr, long flags);
+void __sanitizer_syscall_pre_impl_inotify_init();
+void __sanitizer_syscall_post_impl_inotify_init(long res);
+void __sanitizer_syscall_pre_impl_inotify_init1(long flags);
+void __sanitizer_syscall_post_impl_inotify_init1(long res, long flags);
+void __sanitizer_syscall_pre_impl_inotify_add_watch(long fd, long path,
+ long mask);
+void __sanitizer_syscall_post_impl_inotify_add_watch(long res, long fd,
+ long path, long mask);
+void __sanitizer_syscall_pre_impl_inotify_rm_watch(long fd, long wd);
+void __sanitizer_syscall_post_impl_inotify_rm_watch(long res, long fd, long wd);
+void __sanitizer_syscall_pre_impl_spu_run(long fd, long unpc, long ustatus);
+void __sanitizer_syscall_post_impl_spu_run(long res, long fd, long unpc,
+ long ustatus);
+void __sanitizer_syscall_pre_impl_spu_create(long name, long flags, long mode,
+ long fd);
+void __sanitizer_syscall_post_impl_spu_create(long res, long name, long flags,
+ long mode, long fd);
+void __sanitizer_syscall_pre_impl_mknodat(long dfd, long filename, long mode,
+ long dev);
+void __sanitizer_syscall_post_impl_mknodat(long res, long dfd, long filename,
+ long mode, long dev);
+void __sanitizer_syscall_pre_impl_mkdirat(long dfd, long pathname, long mode);
+void __sanitizer_syscall_post_impl_mkdirat(long res, long dfd, long pathname,
+ long mode);
+void __sanitizer_syscall_pre_impl_unlinkat(long dfd, long pathname, long flag);
+void __sanitizer_syscall_post_impl_unlinkat(long res, long dfd, long pathname,
+ long flag);
+void __sanitizer_syscall_pre_impl_symlinkat(long oldname, long newdfd,
+ long newname);
+void __sanitizer_syscall_post_impl_symlinkat(long res, long oldname,
+ long newdfd, long newname);
+void __sanitizer_syscall_pre_impl_linkat(long olddfd, long oldname, long newdfd,
+ long newname, long flags);
+void __sanitizer_syscall_post_impl_linkat(long res, long olddfd, long oldname,
+ long newdfd, long newname,
+ long flags);
+void __sanitizer_syscall_pre_impl_renameat(long olddfd, long oldname,
+ long newdfd, long newname);
+void __sanitizer_syscall_post_impl_renameat(long res, long olddfd, long oldname,
+ long newdfd, long newname);
+void __sanitizer_syscall_pre_impl_futimesat(long dfd, long filename,
+ long utimes);
+void __sanitizer_syscall_post_impl_futimesat(long res, long dfd, long filename,
+ long utimes);
+void __sanitizer_syscall_pre_impl_faccessat(long dfd, long filename, long mode);
+void __sanitizer_syscall_post_impl_faccessat(long res, long dfd, long filename,
+ long mode);
+void __sanitizer_syscall_pre_impl_fchmodat(long dfd, long filename, long mode);
+void __sanitizer_syscall_post_impl_fchmodat(long res, long dfd, long filename,
+ long mode);
+void __sanitizer_syscall_pre_impl_fchownat(long dfd, long filename, long user,
+ long group, long flag);
+void __sanitizer_syscall_post_impl_fchownat(long res, long dfd, long filename,
+ long user, long group, long flag);
+void __sanitizer_syscall_pre_impl_openat(long dfd, long filename, long flags,
+ long mode);
+void __sanitizer_syscall_post_impl_openat(long res, long dfd, long filename,
+ long flags, long mode);
+void __sanitizer_syscall_pre_impl_newfstatat(long dfd, long filename,
+ long statbuf, long flag);
+void __sanitizer_syscall_post_impl_newfstatat(long res, long dfd, long filename,
+ long statbuf, long flag);
+void __sanitizer_syscall_pre_impl_fstatat64(long dfd, long filename,
+ long statbuf, long flag);
+void __sanitizer_syscall_post_impl_fstatat64(long res, long dfd, long filename,
+ long statbuf, long flag);
+void __sanitizer_syscall_pre_impl_readlinkat(long dfd, long path, long buf,
+ long bufsiz);
+void __sanitizer_syscall_post_impl_readlinkat(long res, long dfd, long path,
+ long buf, long bufsiz);
+void __sanitizer_syscall_pre_impl_utimensat(long dfd, long filename,
+ long utimes, long flags);
+void __sanitizer_syscall_post_impl_utimensat(long res, long dfd, long filename,
+ long utimes, long flags);
+void __sanitizer_syscall_pre_impl_unshare(long unshare_flags);
+void __sanitizer_syscall_post_impl_unshare(long res, long unshare_flags);
+void __sanitizer_syscall_pre_impl_splice(long fd_in, long off_in, long fd_out,
+ long off_out, long len, long flags);
+void __sanitizer_syscall_post_impl_splice(long res, long fd_in, long off_in,
+ long fd_out, long off_out, long len,
+ long flags);
+void __sanitizer_syscall_pre_impl_vmsplice(long fd, long iov, long nr_segs,
+ long flags);
+void __sanitizer_syscall_post_impl_vmsplice(long res, long fd, long iov,
+ long nr_segs, long flags);
+void __sanitizer_syscall_pre_impl_tee(long fdin, long fdout, long len,
+ long flags);
+void __sanitizer_syscall_post_impl_tee(long res, long fdin, long fdout,
+ long len, long flags);
+void __sanitizer_syscall_pre_impl_get_robust_list(long pid, long head_ptr,
+ long len_ptr);
+void __sanitizer_syscall_post_impl_get_robust_list(long res, long pid,
+ long head_ptr, long len_ptr);
+void __sanitizer_syscall_pre_impl_set_robust_list(long head, long len);
+void __sanitizer_syscall_post_impl_set_robust_list(long res, long head,
+ long len);
+void __sanitizer_syscall_pre_impl_getcpu(long cpu, long node, long cache);
+void __sanitizer_syscall_post_impl_getcpu(long res, long cpu, long node,
+ long cache);
+void __sanitizer_syscall_pre_impl_signalfd(long ufd, long user_mask,
+ long sizemask);
+void __sanitizer_syscall_post_impl_signalfd(long res, long ufd, long user_mask,
+ long sizemask);
+void __sanitizer_syscall_pre_impl_signalfd4(long ufd, long user_mask,
+ long sizemask, long flags);
+void __sanitizer_syscall_post_impl_signalfd4(long res, long ufd, long user_mask,
+ long sizemask, long flags);
+void __sanitizer_syscall_pre_impl_timerfd_create(long clockid, long flags);
+void __sanitizer_syscall_post_impl_timerfd_create(long res, long clockid,
+ long flags);
+void __sanitizer_syscall_pre_impl_timerfd_settime(long ufd, long flags,
+ long utmr, long otmr);
+void __sanitizer_syscall_post_impl_timerfd_settime(long res, long ufd,
+ long flags, long utmr,
+ long otmr);
+void __sanitizer_syscall_pre_impl_timerfd_gettime(long ufd, long otmr);
+void __sanitizer_syscall_post_impl_timerfd_gettime(long res, long ufd,
+ long otmr);
+void __sanitizer_syscall_pre_impl_eventfd(long count);
+void __sanitizer_syscall_post_impl_eventfd(long res, long count);
+void __sanitizer_syscall_pre_impl_eventfd2(long count, long flags);
+void __sanitizer_syscall_post_impl_eventfd2(long res, long count, long flags);
+void __sanitizer_syscall_pre_impl_old_readdir(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_old_readdir(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_pselect6(long arg0, long arg1, long arg2,
+ long arg3, long arg4, long arg5);
+void __sanitizer_syscall_post_impl_pselect6(long res, long arg0, long arg1,
+ long arg2, long arg3, long arg4,
+ long arg5);
+void __sanitizer_syscall_pre_impl_ppoll(long arg0, long arg1, long arg2,
+ long arg3, long arg4);
+void __sanitizer_syscall_post_impl_ppoll(long res, long arg0, long arg1,
+ long arg2, long arg3, long arg4);
+void __sanitizer_syscall_pre_impl_fanotify_init(long flags, long event_f_flags);
+void __sanitizer_syscall_post_impl_fanotify_init(long res, long flags,
+ long event_f_flags);
+void __sanitizer_syscall_pre_impl_fanotify_mark(long fanotify_fd, long flags,
+ long mask, long fd,
+ long pathname);
+void __sanitizer_syscall_post_impl_fanotify_mark(long res, long fanotify_fd,
+ long flags, long mask, long fd,
+ long pathname);
+void __sanitizer_syscall_pre_impl_syncfs(long fd);
+void __sanitizer_syscall_post_impl_syncfs(long res, long fd);
+void __sanitizer_syscall_pre_impl_perf_event_open(long attr_uptr, long pid,
+ long cpu, long group_fd,
+ long flags);
+void __sanitizer_syscall_post_impl_perf_event_open(long res, long attr_uptr,
+ long pid, long cpu,
+ long group_fd, long flags);
+void __sanitizer_syscall_pre_impl_mmap_pgoff(long addr, long len, long prot,
+ long flags, long fd, long pgoff);
+void __sanitizer_syscall_post_impl_mmap_pgoff(long res, long addr, long len,
+ long prot, long flags, long fd,
+ long pgoff);
+void __sanitizer_syscall_pre_impl_old_mmap(long arg);
+void __sanitizer_syscall_post_impl_old_mmap(long res, long arg);
+void __sanitizer_syscall_pre_impl_name_to_handle_at(long dfd, long name,
+ long handle, long mnt_id,
+ long flag);
+void __sanitizer_syscall_post_impl_name_to_handle_at(long res, long dfd,
+ long name, long handle,
+ long mnt_id, long flag);
+void __sanitizer_syscall_pre_impl_open_by_handle_at(long mountdirfd,
+ long handle, long flags);
+void __sanitizer_syscall_post_impl_open_by_handle_at(long res, long mountdirfd,
+ long handle, long flags);
+void __sanitizer_syscall_pre_impl_setns(long fd, long nstype);
+void __sanitizer_syscall_post_impl_setns(long res, long fd, long nstype);
+void __sanitizer_syscall_pre_impl_process_vm_readv(long pid, long lvec,
+ long liovcnt, long rvec,
+ long riovcnt, long flags);
+void __sanitizer_syscall_post_impl_process_vm_readv(long res, long pid,
+ long lvec, long liovcnt,
+ long rvec, long riovcnt,
+ long flags);
+void __sanitizer_syscall_pre_impl_process_vm_writev(long pid, long lvec,
+ long liovcnt, long rvec,
+ long riovcnt, long flags);
+void __sanitizer_syscall_post_impl_process_vm_writev(long res, long pid,
+ long lvec, long liovcnt,
+ long rvec, long riovcnt,
+ long flags);
+void __sanitizer_syscall_pre_impl_fork();
+void __sanitizer_syscall_post_impl_fork(long res);
+void __sanitizer_syscall_pre_impl_vfork();
+void __sanitizer_syscall_post_impl_vfork(long res);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_LINUX_SYSCALL_HOOKS_H
diff --git a/clang-2690385/lib64/clang/3.8/include/sanitizer/lsan_interface.h b/clang-2690385/lib64/clang/3.8/include/sanitizer/lsan_interface.h
new file mode 100644
index 00000000..8fb8e756
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/sanitizer/lsan_interface.h
@@ -0,0 +1,84 @@
+//===-- sanitizer/lsan_interface.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LSAN_INTERFACE_H
+#define SANITIZER_LSAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ // Allocations made between calls to __lsan_disable() and __lsan_enable() will
+ // be treated as non-leaks. Disable/enable pairs may be nested.
+ void __lsan_disable();
+ void __lsan_enable();
+
+ // The heap object into which p points will be treated as a non-leak.
+ void __lsan_ignore_object(const void *p);
+
+ // Memory regions registered through this interface will be treated as sources
+ // of live pointers during leak checking. Useful if you store pointers in
+ // mapped memory.
+ // Points of note:
+ // - __lsan_unregister_root_region() must be called with the same pointer and
+ // size that have earlier been passed to __lsan_register_root_region()
+ // - LSan will skip any inaccessible memory when scanning a root region. E.g.,
+ // if you map memory within a larger region that you have mprotect'ed, you can
+ // register the entire large region.
+ // - the implementation is not optimized for performance. This interface is
+ // intended to be used for a small number of relatively static regions.
+ void __lsan_register_root_region(const void *p, size_t size);
+ void __lsan_unregister_root_region(const void *p, size_t size);
+
+ // Check for leaks now. This function behaves identically to the default
+ // end-of-process leak check. In particular, it will terminate the process if
+ // leaks are found and the exitcode runtime flag is non-zero.
+ // Subsequent calls to this function will have no effect and end-of-process
+ // leak check will not run. Effectively, end-of-process leak check is moved to
+ // the time of first invocation of this function.
+ // By calling this function early during process shutdown, you can instruct
+ // LSan to ignore shutdown-only leaks which happen later on.
+ void __lsan_do_leak_check();
+
+ // Check for leaks now. Returns zero if no leaks have been found or if leak
+ // detection is disabled, non-zero otherwise.
+ // This function may be called repeatedly, e.g. to periodically check a
+ // long-running process. It prints a leak report if appropriate, but does not
+ // terminate the process. It does not affect the behavior of
+ // __lsan_do_leak_check() or the end-of-process leak check, and is not
+ // affected by them.
+ int __lsan_do_recoverable_leak_check();
+
+ // The user may optionally provide this function to disallow leak checking
+ // for the program it is linked into (if the return value is non-zero). This
+ // function must be defined as returning a constant value; any behavior beyond
+ // that is unsupported.
+ int __lsan_is_turned_off();
+
+ // This function may be optionally provided by the user and should return
+ // a string containing LSan suppressions.
+ const char *__lsan_default_suppressions();
+#ifdef __cplusplus
+} // extern "C"
+
+namespace __lsan {
+class ScopedDisabler {
+ public:
+ ScopedDisabler() { __lsan_disable(); }
+ ~ScopedDisabler() { __lsan_enable(); }
+};
+} // namespace __lsan
+#endif
+
+#endif // SANITIZER_LSAN_INTERFACE_H
diff --git a/clang-2690385/lib64/clang/3.8/include/sanitizer/msan_interface.h b/clang-2690385/lib64/clang/3.8/include/sanitizer/msan_interface.h
new file mode 100644
index 00000000..6d6a3765
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/sanitizer/msan_interface.h
@@ -0,0 +1,111 @@
+//===-- msan_interface.h --------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_INTERFACE_H
+#define MSAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /* Set raw origin for the memory range. */
+ void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
+
+ /* Get raw origin for an address. */
+ uint32_t __msan_get_origin(const volatile void *a);
+
+ /* Test that this_id is a descendant of prev_id (or they are simply equal).
+ * "descendant" here means they are part of the same chain, created with
+ * __msan_chain_origin. */
+ int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
+
+ /* Returns non-zero if tracking origins. */
+ int __msan_get_track_origins();
+
+ /* Returns the origin id of the latest UMR in the calling thread. */
+ uint32_t __msan_get_umr_origin();
+
+ /* Make memory region fully initialized (without changing its contents). */
+ void __msan_unpoison(const volatile void *a, size_t size);
+
+ /* Make a null-terminated string fully initialized (without changing its
+ contents). */
+ void __msan_unpoison_string(const volatile char *a);
+
+ /* Make memory region fully uninitialized (without changing its contents).
+ This is a legacy interface that does not update origin information. Use
+ __msan_allocated_memory() instead. */
+ void __msan_poison(const volatile void *a, size_t size);
+
+ /* Make memory region partially uninitialized (without changing its contents).
+ */
+ void __msan_partial_poison(const volatile void *data, void *shadow,
+ size_t size);
+
+ /* Returns the offset of the first (at least partially) poisoned byte in the
+ memory range, or -1 if the whole range is good. */
+ intptr_t __msan_test_shadow(const volatile void *x, size_t size);
+
+ /* Checks that memory range is fully initialized, and reports an error if it
+ * is not. */
+ void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
+
+ /* For testing:
+ __msan_set_expect_umr(1);
+ ... some buggy code ...
+ __msan_set_expect_umr(0);
+ The last line will verify that a UMR happened. */
+ void __msan_set_expect_umr(int expect_umr);
+
+ /* Change the value of keep_going flag. Non-zero value means don't terminate
+ program execution when an error is detected. This will not affect error in
+ modules that were compiled without the corresponding compiler flag. */
+ void __msan_set_keep_going(int keep_going);
+
+ /* Print shadow and origin for the memory range to stderr in a human-readable
+ format. */
+ void __msan_print_shadow(const volatile void *x, size_t size);
+
+ /* Print shadow for the memory range to stderr in a minimalistic
+ human-readable format. */
+ void __msan_dump_shadow(const volatile void *x, size_t size);
+
+ /* Returns true if running under a dynamic tool (DynamoRio-based). */
+ int __msan_has_dynamic_component();
+
+ /* Tell MSan about newly allocated memory (ex.: custom allocator).
+ Memory will be marked uninitialized, with origin at the call site. */
+ void __msan_allocated_memory(const volatile void* data, size_t size);
+
+ /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
+ void __sanitizer_dtor_callback(const volatile void* data, size_t size);
+
+ /* This function may be optionally provided by user and should return
+ a string containing Msan runtime options. See msan_flags.h for details. */
+ const char* __msan_default_options();
+
+ /* Deprecated. Call __sanitizer_set_death_callback instead. */
+ void __msan_set_death_callback(void (*callback)(void));
+
+ /* Update shadow for the application copy of size bytes from src to dst.
+ Src and dst are application addresses. This function does not copy the
+ actual application memory, it only updates shadow and origin for such
+ copy. Source and destination regions can overlap. */
+ void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
+ size_t size);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/sanitizer/tsan_interface_atomic.h b/clang-2690385/lib64/clang/3.8/include/sanitizer/tsan_interface_atomic.h
new file mode 100644
index 00000000..4ea77d22
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/sanitizer/tsan_interface_atomic.h
@@ -0,0 +1,222 @@
+//===-- tsan_interface_atomic.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Public interface header for TSan atomics.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16; // NOLINT
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64; // NOLINT
+#if defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)
+__extension__ typedef __int128 __tsan_atomic128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
+
+// Part of ABI, do not change.
+// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
+typedef enum {
+ __tsan_memory_order_relaxed,
+ __tsan_memory_order_consume,
+ __tsan_memory_order_acquire,
+ __tsan_memory_order_release,
+ __tsan_memory_order_acq_rel,
+ __tsan_memory_order_seq_cst
+} __tsan_memory_order;
+
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+ __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+ __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+ __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+ __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+ __tsan_memory_order mo);
+#endif
+
+void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+ __tsan_memory_order mo);
+void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+ __tsan_memory_order mo);
+void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+ __tsan_memory_order mo);
+void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+ __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+ __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+#endif
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+#endif
+
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TSAN_INTERFACE_ATOMIC_H
diff --git a/clang-2690385/lib64/clang/3.8/include/shaintrin.h b/clang-2690385/lib64/clang/3.8/include/shaintrin.h
new file mode 100644
index 00000000..9b5d2180
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/shaintrin.h
@@ -0,0 +1,75 @@
+/*===---- shaintrin.h - SHA intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <shaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __SHAINTRIN_H
+#define __SHAINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha")))
+
+#define _mm_sha1rnds4_epu32(V1, V2, M) __extension__ ({ \
+ __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)
+{
+ return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha256msg2((__v4si)__X, (__v4si)__Y);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __SHAINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/smmintrin.h b/clang-2690385/lib64/clang/3.8/include/smmintrin.h
new file mode 100644
index 00000000..69ad07f4
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/smmintrin.h
@@ -0,0 +1,508 @@
+/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _SMMINTRIN_H
+#define _SMMINTRIN_H
+
+#include <tmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1")))
+
+/* SSE4 Rounding macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
+#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
+#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
+#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
+#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
+#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
+#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
+
+#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
+#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
+
+#define _mm_round_ps(X, M) __extension__ ({ \
+ (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)); })
+
+#define _mm_round_ss(X, Y, M) __extension__ ({ \
+ (__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (M)); })
+
+#define _mm_round_pd(X, M) __extension__ ({ \
+ (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)); })
+
+#define _mm_round_sd(X, Y, M) __extension__ ({ \
+ (__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (M)); })
+
+/* SSE4 Packed Blending Intrinsics. */
+#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(V1), \
+ (__v2df)(__m128d)(V2), \
+ (((M) & 0x01) ? 2 : 0), \
+ (((M) & 0x02) ? 3 : 1)); })
+
+#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
+ (((M) & 0x01) ? 4 : 0), \
+ (((M) & 0x02) ? 5 : 1), \
+ (((M) & 0x04) ? 6 : 2), \
+ (((M) & 0x08) ? 7 : 3)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
+{
+ return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
+ (__v2df)__M);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
+{
+ return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
+ (__v4sf)__M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
+{
+ return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
+ (__v16qi)__M);
+}
+
+#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(V1), \
+ (__v8hi)(__m128i)(V2), \
+ (((M) & 0x01) ? 8 : 0), \
+ (((M) & 0x02) ? 9 : 1), \
+ (((M) & 0x04) ? 10 : 2), \
+ (((M) & 0x08) ? 11 : 3), \
+ (((M) & 0x10) ? 12 : 4), \
+ (((M) & 0x20) ? 13 : 5), \
+ (((M) & 0x40) ? 14 : 6), \
+ (((M) & 0x80) ? 15 : 7)); })
+
+/* SSE4 Dword Multiply Instructions. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) ((__v4si)__V1 * (__v4si)__V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mul_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
+}
+
+/* SSE4 Floating Point Dot Product Instructions. */
+#define _mm_dp_ps(X, Y, M) __extension__ ({ \
+ (__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (M)); })
+
+#define _mm_dp_pd(X, Y, M) __extension__ ({\
+ (__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (M)); })
+
+/* SSE4 Streaming Load Hint Instruction. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_stream_load_si128 (__m128i const *__V)
+{
+ return (__m128i) __builtin_ia32_movntdqa ((const __v2di *) __V);
+}
+
+/* SSE4 Packed Integer Min/Max Instructions. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi8 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi8 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu16 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu16 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
+}
+
+/* SSE4 Insertion and Extraction from XMM Register Instructions. */
+#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
+#define _mm_extract_ps(X, N) (__extension__ \
+ ({ union { int __i; float __f; } __t; \
+ __v4sf __a = (__v4sf)(__m128)(X); \
+ __t.__f = __a[(N) & 3]; \
+ __t.__i;}))
+
+/* Miscellaneous insert and extract macros. */
+/* Extract a single-precision float from X at index N into D. */
+#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
+ (D) = __a[N]; }))
+
+/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
+ an index suitable for _mm_insert_ps. */
+#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
+
+/* Extract a float from X at index N into the first index of the return. */
+#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
+ _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
+
+/* Insert int into packed integer array at index. */
+#define _mm_insert_epi8(X, I, N) (__extension__ \
+ ({ __v16qi __a = (__v16qi)(__m128i)(X); \
+ __a[(N) & 15] = (I); \
+ __a;}))
+#define _mm_insert_epi32(X, I, N) (__extension__ \
+ ({ __v4si __a = (__v4si)(__m128i)(X); \
+ __a[(N) & 3] = (I); \
+ __a;}))
+#ifdef __x86_64__
+#define _mm_insert_epi64(X, I, N) (__extension__ \
+ ({ __v2di __a = (__v2di)(__m128i)(X); \
+ __a[(N) & 1] = (I); \
+ __a;}))
+#endif /* __x86_64__ */
+
+/* Extract int from packed integer array at index. This returns the element
+ * as a zero extended value, so it is unsigned.
+ */
+#define _mm_extract_epi8(X, N) (__extension__ \
+ ({ __v16qi __a = (__v16qi)(__m128i)(X); \
+ (int)(unsigned char) __a[(N) & 15];}))
+#define _mm_extract_epi32(X, N) (__extension__ \
+ ({ __v4si __a = (__v4si)(__m128i)(X); \
+ (int)__a[(N) & 3];}))
+#ifdef __x86_64__
+#define _mm_extract_epi64(X, N) (__extension__ \
+ ({ __v2di __a = (__v2di)(__m128i)(X); \
+ (long long)__a[(N) & 1];}))
+#endif /* __x86_64 */
+
+/* SSE4 128-bit Packed Integer Comparisons. */
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_testz_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_testc_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_testnzc_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
+}
+
+#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
+#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
+
+/* SSE4 64-bit Packed Integer Comparisons. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
+{
+ return (__m128i)((__v2di)__V1 == (__v2di)__V2);
+}
+
+/* SSE4 Packed Integer Sign-Extension. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi8_epi16(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi8_epi32(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi8_epi64(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ typedef signed char __v16qs __attribute__((__vector_size__(16)));
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi16_epi32(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi16_epi64(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi32_epi64(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
+}
+
+/* SSE4 Packed Integer Zero-Extension. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu8_epi16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu8_epi32(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu8_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu16_epi32(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu16_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu32_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V);
+}
+
+/* SSE4 Pack with Unsigned Saturation. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packus_epi32(__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
+}
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
+ (__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
+ (__v16qi)(__m128i)(Y), (M)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_minpos_epu16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+}
+
+/* Handle the sse4.2 definitions here. */
+
+/* These definitions are normally in nmmintrin.h, but gcc puts them in here
+ so we'll do the same. */
+
+#undef __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+
+/* These specify the type of data that we're comparing. */
+#define _SIDD_UBYTE_OPS 0x00
+#define _SIDD_UWORD_OPS 0x01
+#define _SIDD_SBYTE_OPS 0x02
+#define _SIDD_SWORD_OPS 0x03
+
+/* These specify the type of comparison operation. */
+#define _SIDD_CMP_EQUAL_ANY 0x00
+#define _SIDD_CMP_RANGES 0x04
+#define _SIDD_CMP_EQUAL_EACH 0x08
+#define _SIDD_CMP_EQUAL_ORDERED 0x0c
+
+/* These macros specify the polarity of the operation. */
+#define _SIDD_POSITIVE_POLARITY 0x00
+#define _SIDD_NEGATIVE_POLARITY 0x10
+#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
+
+/* These macros are used in _mm_cmpXstri() to specify the return. */
+#define _SIDD_LEAST_SIGNIFICANT 0x00
+#define _SIDD_MOST_SIGNIFICANT 0x40
+
+/* These macros are used in _mm_cmpXstri() to specify the return. */
+#define _SIDD_BIT_MASK 0x00
+#define _SIDD_UNIT_MASK 0x40
+
+/* SSE4.2 Packed Comparison Intrinsics. */
+#define _mm_cmpistrm(A, B, M) \
+ (__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistri(A, B, M) \
+ (int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+
+#define _mm_cmpestrm(A, LA, B, LB, M) \
+ (__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestri(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+
+/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
+#define _mm_cmpistra(A, B, M) \
+ (int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrc(A, B, M) \
+ (int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistro(A, B, M) \
+ (int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrs(A, B, M) \
+ (int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrz(A, B, M) \
+ (int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+
+#define _mm_cmpestra(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestrc(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestro(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestrs(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestrz(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+
+/* SSE4.2 Compare Packed Data -- Greater Than. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
+{
+ return (__m128i)((__v2di)__V1 > (__v2di)__V2);
+}
+
+/* SSE4.2 Accumulate CRC32. */
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u8(unsigned int __C, unsigned char __D)
+{
+ return __builtin_ia32_crc32qi(__C, __D);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u16(unsigned int __C, unsigned short __D)
+{
+ return __builtin_ia32_crc32hi(__C, __D);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u32(unsigned int __C, unsigned int __D)
+{
+ return __builtin_ia32_crc32si(__C, __D);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_crc32_u64(unsigned long long __C, unsigned long long __D)
+{
+ return __builtin_ia32_crc32di(__C, __D);
+}
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
+
+#endif /* _SMMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/stdalign.h b/clang-2690385/lib64/clang/3.8/include/stdalign.h
new file mode 100644
index 00000000..3738d128
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/stdalign.h
@@ -0,0 +1,35 @@
+/*===---- stdalign.h - Standard header for alignment ------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDALIGN_H
+#define __STDALIGN_H
+
+#ifndef __cplusplus
+#define alignas _Alignas
+#define alignof _Alignof
+#endif
+
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+
+#endif /* __STDALIGN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/stdarg.h b/clang-2690385/lib64/clang/3.8/include/stdarg.h
new file mode 100644
index 00000000..a57e1836
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/stdarg.h
@@ -0,0 +1,52 @@
+/*===---- stdarg.h - Variable argument handling ----------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDARG_H
+#define __STDARG_H
+
+#ifndef _VA_LIST
+typedef __builtin_va_list va_list;
+#define _VA_LIST
+#endif
+#define va_start(ap, param) __builtin_va_start(ap, param)
+#define va_end(ap) __builtin_va_end(ap)
+#define va_arg(ap, type) __builtin_va_arg(ap, type)
+
+/* GCC always defines __va_copy, but does not define va_copy unless in c99 mode
+ * or -ansi is not specified, since it was not part of C90.
+ */
+#define __va_copy(d,s) __builtin_va_copy(d,s)
+
+#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
+#define va_copy(dest, src) __builtin_va_copy(dest, src)
+#endif
+
+/* Hack required to make standard headers work, at least on Ubuntu */
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST 1
+#endif
+typedef __builtin_va_list __gnuc_va_list;
+
+#endif /* __STDARG_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/stdatomic.h b/clang-2690385/lib64/clang/3.8/include/stdatomic.h
new file mode 100644
index 00000000..2c4f1ce3
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/stdatomic.h
@@ -0,0 +1,582 @@
+/*-
+ * Copyright (c) 2011 Ed Schouten <ed@FreeBSD.org>
+ * David Chisnall <theraven@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _STDATOMIC_H_
+#define _STDATOMIC_H_
+
+#include <sys/cdefs.h>
+
+#if defined(__GNUC__) && !defined(__GNUC_PREREQ)
+/* Duplicate definition here, since the mingw sys/cdefs.h omits the */
+/* definition, and this needs to be usable there. */
+#define __GNUC_PREREQ(x, y) \
+ ((__GNUC__ == (x) && __GNUC_MINOR__ >= (y)) || (__GNUC__ > (x)))
+#endif /* __GNUC__ && ... */
+
+#if defined(__cplusplus) && __cplusplus >= 201103L && defined(_USING_LIBCXX)
+# ifdef __clang__
+# if __has_feature(cxx_atomic)
+# define _STDATOMIC_HAVE_ATOMIC
+# endif
+# else /* gcc */
+# if __GNUC_PREREQ(4, 7)
+# define _STDATOMIC_HAVE_ATOMIC
+# endif
+# endif
+#endif
+
+#ifdef _STDATOMIC_HAVE_ATOMIC
+
+/* We have a usable C++ <atomic>; use it instead. */
+
+#include <atomic>
+
+#undef _Atomic
+ /* Also defined by <atomic> for gcc. But not used in macros. */
+ /* Also a clang intrinsic. */
+ /* Should not be used by client code before this file is */
+ /* included. The definitions in <atomic> themselves see */
+ /* the old definition, as they should. */
+ /* Client code sees the following definition. */
+
+#define _Atomic(t) std::atomic<t>
+
+using std::atomic_is_lock_free;
+using std::atomic_init;
+using std::atomic_store;
+using std::atomic_store_explicit;
+using std::atomic_load;
+using std::atomic_load_explicit;
+using std::atomic_exchange;
+using std::atomic_exchange_explicit;
+using std::atomic_compare_exchange_strong;
+using std::atomic_compare_exchange_strong_explicit;
+using std::atomic_compare_exchange_weak;
+using std::atomic_compare_exchange_weak_explicit;
+using std::atomic_fetch_add;
+using std::atomic_fetch_add_explicit;
+using std::atomic_fetch_sub;
+using std::atomic_fetch_sub_explicit;
+using std::atomic_fetch_or;
+using std::atomic_fetch_or_explicit;
+using std::atomic_fetch_xor;
+using std::atomic_fetch_xor_explicit;
+using std::atomic_fetch_and;
+using std::atomic_fetch_and_explicit;
+using std::atomic_thread_fence;
+using std::atomic_signal_fence;
+
+using std::memory_order;
+using std::memory_order_relaxed;
+using std::memory_order_consume;
+using std::memory_order_acquire;
+using std::memory_order_release;
+using std::memory_order_acq_rel;
+using std::memory_order_seq_cst;
+
+using std::atomic_bool;
+using std::atomic_char;
+using std::atomic_schar;
+using std::atomic_uchar;
+using std::atomic_short;
+using std::atomic_ushort;
+using std::atomic_int;
+using std::atomic_uint;
+using std::atomic_long;
+using std::atomic_ulong;
+using std::atomic_llong;
+using std::atomic_ullong;
+using std::atomic_char16_t;
+using std::atomic_char32_t;
+using std::atomic_wchar_t;
+using std::atomic_int_least8_t;
+using std::atomic_uint_least8_t;
+using std::atomic_int_least16_t;
+using std::atomic_uint_least16_t;
+using std::atomic_int_least32_t;
+using std::atomic_uint_least32_t;
+using std::atomic_int_least64_t;
+using std::atomic_uint_least64_t;
+using std::atomic_int_fast8_t;
+using std::atomic_uint_fast8_t;
+using std::atomic_int_fast16_t;
+using std::atomic_uint_fast16_t;
+using std::atomic_int_fast32_t;
+using std::atomic_uint_fast32_t;
+using std::atomic_int_fast64_t;
+using std::atomic_uint_fast64_t;
+using std::atomic_intptr_t;
+using std::atomic_uintptr_t;
+using std::atomic_size_t;
+using std::atomic_ptrdiff_t;
+using std::atomic_intmax_t;
+using std::atomic_uintmax_t;
+
+#else /* <atomic> unavailable, possibly because this is C, not C++ */
+
+#include <sys/types.h>
+#include <stdbool.h>
+
+/*
+ * C: Do it ourselves.
+ * Note that the runtime representation defined here should be compatible
+ * with the C++ one, i.e. an _Atomic(T) needs to contain the same
+ * bits as a T.
+ */
+
+#include <stddef.h> /* For ptrdiff_t. */
+#include <stdint.h> /* TODO: Should pollute namespace less. */
+#if __STDC_VERSION__ >= 201112L
+# include <uchar.h> /* For char16_t and char32_t. */
+#endif
+
+#ifdef __clang__
+# if __has_extension(c_atomic) || __has_extension(cxx_atomic)
+# define __CLANG_ATOMICS
+# else
+# error "stdatomic.h does not support your compiler"
+# endif
+# if __has_builtin(__sync_swap)
+# define __HAS_BUILTIN_SYNC_SWAP
+# endif
+#else
+# if __GNUC_PREREQ(4, 7)
+# define __GNUC_ATOMICS
+# else
+# define __SYNC_ATOMICS
+# ifdef __cplusplus
+# define __ATOMICS_AVOID_DOT_INIT
+# endif
+# endif
+#endif
+
+/*
+ * 7.17.1 Atomic lock-free macros.
+ */
+
+#ifdef __GCC_ATOMIC_BOOL_LOCK_FREE
+#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_BOOL_LOCK_FREE 2 /* For all modern platforms */
+#endif
+#ifdef __GCC_ATOMIC_CHAR_LOCK_FREE
+#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_CHAR_LOCK_FREE 2
+#endif
+#ifdef __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_CHAR16_T_LOCK_FREE 2
+#endif
+#ifdef __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_CHAR32_T_LOCK_FREE 2
+#endif
+#ifdef __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_WCHAR_T_LOCK_FREE 2
+#endif
+#ifdef __GCC_ATOMIC_SHORT_LOCK_FREE
+#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_SHORT_LOCK_FREE 2
+#endif
+#ifdef __GCC_ATOMIC_INT_LOCK_FREE
+#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_INT_LOCK_FREE 2
+#endif
+#ifdef __GCC_ATOMIC_LONG_LOCK_FREE
+#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_LONG_LOCK_FREE 2
+#endif
+#ifdef __GCC_ATOMIC_LLONG_LOCK_FREE
+#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_LLONG_LOCK_FREE 1 /* maybe */
+#endif
+#ifdef __GCC_ATOMIC_POINTER_LOCK_FREE
+#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_POINTER_LOCK_FREE 2
+#endif
+
+/*
+ * 7.17.2 Initialization.
+ */
+
+#if defined(__CLANG_ATOMICS)
+#define ATOMIC_VAR_INIT(value) (value)
+#define atomic_init(obj, value) __c11_atomic_init(obj, value)
+#else
+#ifdef __ATOMICS_AVOID_DOT_INIT
+#define ATOMIC_VAR_INIT(value) { value }
+#else
+#define ATOMIC_VAR_INIT(value) { .__val = (value) }
+#endif
+#define atomic_init(obj, value) ((void)((obj)->__val = (value)))
+#endif
+
+/*
+ * Clang and recent GCC both provide predefined macros for the memory
+ * orderings. If we are using a compiler that doesn't define them, use the
+ * clang values - these will be ignored in the fallback path.
+ */
+
+#ifndef __ATOMIC_RELAXED
+#define __ATOMIC_RELAXED 0
+#endif
+#ifndef __ATOMIC_CONSUME
+#define __ATOMIC_CONSUME 1
+#endif
+#ifndef __ATOMIC_ACQUIRE
+#define __ATOMIC_ACQUIRE 2
+#endif
+#ifndef __ATOMIC_RELEASE
+#define __ATOMIC_RELEASE 3
+#endif
+#ifndef __ATOMIC_ACQ_REL
+#define __ATOMIC_ACQ_REL 4
+#endif
+#ifndef __ATOMIC_SEQ_CST
+#define __ATOMIC_SEQ_CST 5
+#endif
+
+/*
+ * 7.17.3 Order and consistency.
+ *
+ * The memory_order_* constants that denote the barrier behaviour of the
+ * atomic operations.
+ * The enum values must be identical to those used by the
+ * C++ <atomic> header.
+ */
+
+typedef enum {
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_consume = __ATOMIC_CONSUME,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+} memory_order;
+
+/*
+ * 7.17.4 Fences.
+ */
+
+static __inline void
+atomic_thread_fence(memory_order __order __attribute__((unused)))
+{
+
+#ifdef __CLANG_ATOMICS
+ __c11_atomic_thread_fence(__order);
+#elif defined(__GNUC_ATOMICS)
+ __atomic_thread_fence(__order);
+#else
+ __sync_synchronize();
+#endif
+}
+
+static __inline void
+atomic_signal_fence(memory_order __order __attribute__((unused)))
+{
+
+#ifdef __CLANG_ATOMICS
+ __c11_atomic_signal_fence(__order);
+#elif defined(__GNUC_ATOMICS)
+ __atomic_signal_fence(__order);
+#else
+ __asm volatile ("" ::: "memory");
+#endif
+}
+
+/*
+ * 7.17.5 Lock-free property.
+ */
+
+#if defined(_KERNEL)
+/* Atomics in kernelspace are always lock-free. */
+#define atomic_is_lock_free(obj) \
+ ((void)(obj), (_Bool)1)
+#elif defined(__CLANG_ATOMICS)
+#define atomic_is_lock_free(obj) \
+ __c11_atomic_is_lock_free(sizeof(*(obj)))
+#elif defined(__GNUC_ATOMICS)
+#define atomic_is_lock_free(obj) \
+ __atomic_is_lock_free(sizeof((obj)->__val), &(obj)->__val)
+#else
+#define atomic_is_lock_free(obj) \
+ ((void)(obj), sizeof((obj)->__val) <= sizeof(void *))
+#endif
+
+/*
+ * 7.17.6 Atomic integer types.
+ */
+
+#ifndef __CLANG_ATOMICS
+/*
+ * No native support for _Atomic(). Place object in structure to prevent
+ * most forms of direct non-atomic access.
+ */
+#define _Atomic(T) struct { T volatile __val; }
+#endif
+
+typedef _Atomic(bool) atomic_bool;
+typedef _Atomic(char) atomic_char;
+typedef _Atomic(signed char) atomic_schar;
+typedef _Atomic(unsigned char) atomic_uchar;
+typedef _Atomic(short) atomic_short;
+typedef _Atomic(unsigned short) atomic_ushort;
+typedef _Atomic(int) atomic_int;
+typedef _Atomic(unsigned int) atomic_uint;
+typedef _Atomic(long) atomic_long;
+typedef _Atomic(unsigned long) atomic_ulong;
+typedef _Atomic(long long) atomic_llong;
+typedef _Atomic(unsigned long long) atomic_ullong;
+#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L
+ typedef _Atomic(char16_t) atomic_char16_t;
+ typedef _Atomic(char32_t) atomic_char32_t;
+#endif
+typedef _Atomic(wchar_t) atomic_wchar_t;
+typedef _Atomic(int_least8_t) atomic_int_least8_t;
+typedef _Atomic(uint_least8_t) atomic_uint_least8_t;
+typedef _Atomic(int_least16_t) atomic_int_least16_t;
+typedef _Atomic(uint_least16_t) atomic_uint_least16_t;
+typedef _Atomic(int_least32_t) atomic_int_least32_t;
+typedef _Atomic(uint_least32_t) atomic_uint_least32_t;
+typedef _Atomic(int_least64_t) atomic_int_least64_t;
+typedef _Atomic(uint_least64_t) atomic_uint_least64_t;
+typedef _Atomic(int_fast8_t) atomic_int_fast8_t;
+typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t;
+typedef _Atomic(int_fast16_t) atomic_int_fast16_t;
+typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t;
+typedef _Atomic(int_fast32_t) atomic_int_fast32_t;
+typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t;
+typedef _Atomic(int_fast64_t) atomic_int_fast64_t;
+typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t;
+typedef _Atomic(intptr_t) atomic_intptr_t;
+typedef _Atomic(uintptr_t) atomic_uintptr_t;
+typedef _Atomic(size_t) atomic_size_t;
+typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;
+typedef _Atomic(intmax_t) atomic_intmax_t;
+typedef _Atomic(uintmax_t) atomic_uintmax_t;
+
+/*
+ * 7.17.7 Operations on atomic types.
+ */
+
+/*
+ * Compiler-specific operations.
+ */
+
+#if defined(__CLANG_ATOMICS)
+#define atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure) \
+ __c11_atomic_compare_exchange_strong(object, expected, desired, \
+ success, failure)
+#define atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, success, failure) \
+ __c11_atomic_compare_exchange_weak(object, expected, desired, \
+ success, failure)
+#define atomic_exchange_explicit(object, desired, order) \
+ __c11_atomic_exchange(object, desired, order)
+#define atomic_fetch_add_explicit(object, operand, order) \
+ __c11_atomic_fetch_add(object, operand, order)
+#define atomic_fetch_and_explicit(object, operand, order) \
+ __c11_atomic_fetch_and(object, operand, order)
+#define atomic_fetch_or_explicit(object, operand, order) \
+ __c11_atomic_fetch_or(object, operand, order)
+#define atomic_fetch_sub_explicit(object, operand, order) \
+ __c11_atomic_fetch_sub(object, operand, order)
+#define atomic_fetch_xor_explicit(object, operand, order) \
+ __c11_atomic_fetch_xor(object, operand, order)
+#define atomic_load_explicit(object, order) \
+ __c11_atomic_load(object, order)
+#define atomic_store_explicit(object, desired, order) \
+ __c11_atomic_store(object, desired, order)
+#elif defined(__GNUC_ATOMICS)
+#define atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure) \
+ __atomic_compare_exchange_n(&(object)->__val, expected, \
+ desired, 0, success, failure)
+#define atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, success, failure) \
+ __atomic_compare_exchange_n(&(object)->__val, expected, \
+ desired, 1, success, failure)
+#define atomic_exchange_explicit(object, desired, order) \
+ __atomic_exchange_n(&(object)->__val, desired, order)
+#define atomic_fetch_add_explicit(object, operand, order) \
+ __atomic_fetch_add(&(object)->__val, operand, order)
+#define atomic_fetch_and_explicit(object, operand, order) \
+ __atomic_fetch_and(&(object)->__val, operand, order)
+#define atomic_fetch_or_explicit(object, operand, order) \
+ __atomic_fetch_or(&(object)->__val, operand, order)
+#define atomic_fetch_sub_explicit(object, operand, order) \
+ __atomic_fetch_sub(&(object)->__val, operand, order)
+#define atomic_fetch_xor_explicit(object, operand, order) \
+ __atomic_fetch_xor(&(object)->__val, operand, order)
+#define atomic_load_explicit(object, order) \
+ __atomic_load_n(&(object)->__val, order)
+#define atomic_store_explicit(object, desired, order) \
+ __atomic_store_n(&(object)->__val, desired, order)
+#else
+#define __atomic_apply_stride(object, operand) \
+ (((__typeof__((object)->__val))0) + (operand))
+#define atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure) __extension__ ({ \
+ __typeof__(expected) __ep = (expected); \
+ __typeof__(*__ep) __e = *__ep; \
+ (void)(success); (void)(failure); \
+ (bool)((*__ep = __sync_val_compare_and_swap(&(object)->__val, \
+ __e, desired)) == __e); \
+})
+#define atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, success, failure) \
+ atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure)
+#ifdef __HAS_BUILTIN_SYNC_SWAP
+/* Clang provides a full-barrier atomic exchange - use it if available. */
+#define atomic_exchange_explicit(object, desired, order) \
+ ((void)(order), __sync_swap(&(object)->__val, desired))
+#else
+/*
+ * __sync_lock_test_and_set() is only an acquire barrier in theory (although in
+ * practice it is usually a full barrier) so we need an explicit barrier before
+ * it.
+ */
+#define atomic_exchange_explicit(object, desired, order) \
+__extension__ ({ \
+ __typeof__(object) __o = (object); \
+ __typeof__(desired) __d = (desired); \
+ (void)(order); \
+ __sync_synchronize(); \
+ __sync_lock_test_and_set(&(__o)->__val, __d); \
+})
+#endif
+#define atomic_fetch_add_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_add(&(object)->__val, \
+ __atomic_apply_stride(object, operand)))
+#define atomic_fetch_and_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_and(&(object)->__val, operand))
+#define atomic_fetch_or_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_or(&(object)->__val, operand))
+#define atomic_fetch_sub_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_sub(&(object)->__val, \
+ __atomic_apply_stride(object, operand)))
+#define atomic_fetch_xor_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_xor(&(object)->__val, operand))
+#define atomic_load_explicit(object, order) \
+ ((void)(order), __sync_fetch_and_add(&(object)->__val, 0))
+#define atomic_store_explicit(object, desired, order) \
+ ((void)atomic_exchange_explicit(object, desired, order))
+#endif
+
+/*
+ * Convenience functions.
+ *
+ * Don't provide these in kernel space. In kernel space, we should be
+ * disciplined enough to always provide explicit barriers.
+ */
+
+#ifndef _KERNEL
+#define atomic_compare_exchange_strong(object, expected, desired) \
+ atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, memory_order_seq_cst, memory_order_seq_cst)
+#define atomic_compare_exchange_weak(object, expected, desired) \
+ atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, memory_order_seq_cst, memory_order_seq_cst)
+#define atomic_exchange(object, desired) \
+ atomic_exchange_explicit(object, desired, memory_order_seq_cst)
+#define atomic_fetch_add(object, operand) \
+ atomic_fetch_add_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_and(object, operand) \
+ atomic_fetch_and_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_or(object, operand) \
+ atomic_fetch_or_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_sub(object, operand) \
+ atomic_fetch_sub_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_xor(object, operand) \
+ atomic_fetch_xor_explicit(object, operand, memory_order_seq_cst)
+#define atomic_load(object) \
+ atomic_load_explicit(object, memory_order_seq_cst)
+#define atomic_store(object, desired) \
+ atomic_store_explicit(object, desired, memory_order_seq_cst)
+#endif /* !_KERNEL */
+
+/*
+ * 7.17.8 Atomic flag type and operations.
+ *
+ * XXX: Assume atomic_bool can be used as an atomic_flag. Is there some
+ * kind of compiler built-in type we could use?
+ */
+
+typedef struct {
+ atomic_bool __flag;
+} atomic_flag;
+
+#define ATOMIC_FLAG_INIT { ATOMIC_VAR_INIT(false) }
+
+static __inline bool
+atomic_flag_test_and_set_explicit(volatile atomic_flag *__object,
+ memory_order __order)
+{
+ return (atomic_exchange_explicit(&__object->__flag, 1, __order));
+}
+
+static __inline void
+atomic_flag_clear_explicit(volatile atomic_flag *__object, memory_order __order)
+{
+
+ atomic_store_explicit(&__object->__flag, 0, __order);
+}
+
+#ifndef _KERNEL
+static __inline bool
+atomic_flag_test_and_set(volatile atomic_flag *__object)
+{
+
+ return (atomic_flag_test_and_set_explicit(__object,
+ memory_order_seq_cst));
+}
+
+static __inline void
+atomic_flag_clear(volatile atomic_flag *__object)
+{
+
+ atomic_flag_clear_explicit(__object, memory_order_seq_cst);
+}
+#endif /* !_KERNEL */
+
+#endif /* <atomic> unavailable */
+
+#endif /* !_STDATOMIC_H_ */
diff --git a/clang-2690385/lib64/clang/3.8/include/stdbool.h b/clang-2690385/lib64/clang/3.8/include/stdbool.h
new file mode 100644
index 00000000..0467893f
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/stdbool.h
@@ -0,0 +1,44 @@
+/*===---- stdbool.h - Standard header for booleans -------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDBOOL_H
+#define __STDBOOL_H
+
+/* Don't define bool, true, and false in C++, except as a GNU extension. */
+#ifndef __cplusplus
+#define bool _Bool
+#define true 1
+#define false 0
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Define _Bool, bool, false, true as a GNU extension. */
+#define _Bool bool
+#define bool bool
+#define false false
+#define true true
+#endif
+
+#define __bool_true_false_are_defined 1
+
+#endif /* __STDBOOL_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/stddef.h b/clang-2690385/lib64/clang/3.8/include/stddef.h
new file mode 100644
index 00000000..73549967
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/stddef.h
@@ -0,0 +1,137 @@
+/*===---- stddef.h - Basic type definitions --------------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined(__STDDEF_H) || defined(__need_ptrdiff_t) || \
+ defined(__need_size_t) || defined(__need_wchar_t) || \
+ defined(__need_NULL) || defined(__need_wint_t)
+
+#if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \
+ !defined(__need_wchar_t) && !defined(__need_NULL) && \
+ !defined(__need_wint_t)
+/* Always define miscellaneous pieces when modules are available. */
+#if !__has_feature(modules)
+#define __STDDEF_H
+#endif
+#define __need_ptrdiff_t
+#define __need_size_t
+#define __need_wchar_t
+#define __need_NULL
+#define __need_STDDEF_H_misc
+/* __need_wint_t is intentionally not defined here. */
+#endif
+
+#if defined(__need_ptrdiff_t)
+#if !defined(_PTRDIFF_T) || __has_feature(modules)
+/* Always define ptrdiff_t when modules are available. */
+#if !__has_feature(modules)
+#define _PTRDIFF_T
+#endif
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+#endif
+#undef __need_ptrdiff_t
+#endif /* defined(__need_ptrdiff_t) */
+
+#if defined(__need_size_t)
+#if !defined(_SIZE_T) || __has_feature(modules)
+/* Always define size_t when modules are available. */
+#if !__has_feature(modules)
+#define _SIZE_T
+#endif
+typedef __SIZE_TYPE__ size_t;
+#endif
+#undef __need_size_t
+#endif /*defined(__need_size_t) */
+
+#if defined(__need_STDDEF_H_misc)
+/* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is
+ * enabled. */
+#if (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1 && \
+ !defined(_RSIZE_T)) || __has_feature(modules)
+/* Always define rsize_t when modules are available. */
+#if !__has_feature(modules)
+#define _RSIZE_T
+#endif
+typedef __SIZE_TYPE__ rsize_t;
+#endif
+#endif /* defined(__need_STDDEF_H_misc) */
+
+#if defined(__need_wchar_t)
+#ifndef __cplusplus
+/* Always define wchar_t when modules are available. */
+#if !defined(_WCHAR_T) || __has_feature(modules)
+#if !__has_feature(modules)
+#define _WCHAR_T
+#if defined(_MSC_EXTENSIONS)
+#define _WCHAR_T_DEFINED
+#endif
+#endif
+typedef __WCHAR_TYPE__ wchar_t;
+#endif
+#endif
+#undef __need_wchar_t
+#endif /* defined(__need_wchar_t) */
+
+#if defined(__need_NULL)
+#undef NULL
+#ifdef __cplusplus
+# if !defined(__MINGW32__) && !defined(_MSC_VER)
+# define NULL __null
+# else
+# define NULL 0
+# endif
+#else
+# define NULL ((void*)0)
+#endif
+#ifdef __cplusplus
+#if defined(_MSC_EXTENSIONS) && defined(_NATIVE_NULLPTR_SUPPORTED)
+namespace std { typedef decltype(nullptr) nullptr_t; }
+using ::std::nullptr_t;
+#endif
+#endif
+#undef __need_NULL
+#endif /* defined(__need_NULL) */
+
+#if defined(__need_STDDEF_H_misc)
+#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L
+#include "__stddef_max_align_t.h"
+#endif
+#define offsetof(t, d) __builtin_offsetof(t, d)
+#undef __need_STDDEF_H_misc
+#endif /* defined(__need_STDDEF_H_misc) */
+
+/* Some C libraries expect to see a wint_t here. Others (notably MinGW) will use
+__WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */
+#if defined(__need_wint_t)
+/* Always define wint_t when modules are available. */
+#if !defined(_WINT_T) || __has_feature(modules)
+#if !__has_feature(modules)
+#define _WINT_T
+#endif
+typedef __WINT_TYPE__ wint_t;
+#endif
+#undef __need_wint_t
+#endif /* __need_wint_t */
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/stdint.h b/clang-2690385/lib64/clang/3.8/include/stdint.h
new file mode 100644
index 00000000..3f2fcbc5
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/stdint.h
@@ -0,0 +1,707 @@
+/*===---- stdint.h - Standard header for sized integer types --------------===*\
+ *
+ * Copyright (c) 2009 Chris Lattner
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_STDINT_H
+#define __CLANG_STDINT_H
+
+/* If we're hosted, fall back to the system's stdint.h, which might have
+ * additional definitions.
+ */
+#if __STDC_HOSTED__ && __has_include_next(<stdint.h>)
+
+// C99 7.18.3 Limits of other integer types
+//
+// Footnote 219, 220: C++ implementations should define these macros only when
+// __STDC_LIMIT_MACROS is defined before <stdint.h> is included.
+//
+// Footnote 222: C++ implementations should define these macros only when
+// __STDC_CONSTANT_MACROS is defined before <stdint.h> is included.
+//
+// C++11 [cstdint.syn]p2:
+//
+// The macros defined by <cstdint> are provided unconditionally. In particular,
+// the symbols __STDC_LIMIT_MACROS and __STDC_CONSTANT_MACROS (mentioned in
+// footnotes 219, 220, and 222 in the C standard) play no role in C++.
+//
+// C11 removed the problematic footnotes.
+//
+// Work around this inconsistency by always defining those macros in C++ mode,
+// so that a C library implementation which follows the C99 standard can be
+// used in C++.
+# ifdef __cplusplus
+# if !defined(__STDC_LIMIT_MACROS)
+# define __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
+# endif
+# if !defined(__STDC_CONSTANT_MACROS)
+# define __STDC_CONSTANT_MACROS
+# define __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
+# endif
+# endif
+
+# include_next <stdint.h>
+
+# ifdef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
+# undef __STDC_LIMIT_MACROS
+# undef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
+# endif
+# ifdef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
+# undef __STDC_CONSTANT_MACROS
+# undef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
+# endif
+
+#else
+
+/* C99 7.18.1.1 Exact-width integer types.
+ * C99 7.18.1.2 Minimum-width integer types.
+ * C99 7.18.1.3 Fastest minimum-width integer types.
+ *
+ * The standard requires that exact-width type be defined for 8-, 16-, 32-, and
+ * 64-bit types if they are implemented. Other exact width types are optional.
+ * This implementation defines an exact-width types for every integer width
+ * that is represented in the standard integer types.
+ *
+ * The standard also requires minimum-width types be defined for 8-, 16-, 32-,
+ * and 64-bit widths regardless of whether there are corresponding exact-width
+ * types.
+ *
+ * To accommodate targets that are missing types that are exactly 8, 16, 32, or
+ * 64 bits wide, this implementation takes an approach of cascading
+ * redefintions, redefining __int_leastN_t to successively smaller exact-width
+ * types. It is therefore important that the types are defined in order of
+ * descending widths.
+ *
+ * We currently assume that the minimum-width types and the fastest
+ * minimum-width types are the same. This is allowed by the standard, but is
+ * suboptimal.
+ *
+ * In violation of the standard, some targets do not implement a type that is
+ * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit).
+ * To accommodate these targets, a required minimum-width type is only
+ * defined if there exists an exact-width type of equal or greater width.
+ */
+
+#ifdef __INT64_TYPE__
+# ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/
+typedef __INT64_TYPE__ int64_t;
+# endif /* __int8_t_defined */
+typedef __UINT64_TYPE__ uint64_t;
+# define __int_least64_t int64_t
+# define __uint_least64_t uint64_t
+# define __int_least32_t int64_t
+# define __uint_least32_t uint64_t
+# define __int_least16_t int64_t
+# define __uint_least16_t uint64_t
+# define __int_least8_t int64_t
+# define __uint_least8_t uint64_t
+#endif /* __INT64_TYPE__ */
+
+#ifdef __int_least64_t
+typedef __int_least64_t int_least64_t;
+typedef __uint_least64_t uint_least64_t;
+typedef __int_least64_t int_fast64_t;
+typedef __uint_least64_t uint_fast64_t;
+#endif /* __int_least64_t */
+
+#ifdef __INT56_TYPE__
+typedef __INT56_TYPE__ int56_t;
+typedef __UINT56_TYPE__ uint56_t;
+typedef int56_t int_least56_t;
+typedef uint56_t uint_least56_t;
+typedef int56_t int_fast56_t;
+typedef uint56_t uint_fast56_t;
+# define __int_least32_t int56_t
+# define __uint_least32_t uint56_t
+# define __int_least16_t int56_t
+# define __uint_least16_t uint56_t
+# define __int_least8_t int56_t
+# define __uint_least8_t uint56_t
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+typedef __INT48_TYPE__ int48_t;
+typedef __UINT48_TYPE__ uint48_t;
+typedef int48_t int_least48_t;
+typedef uint48_t uint_least48_t;
+typedef int48_t int_fast48_t;
+typedef uint48_t uint_fast48_t;
+# define __int_least32_t int48_t
+# define __uint_least32_t uint48_t
+# define __int_least16_t int48_t
+# define __uint_least16_t uint48_t
+# define __int_least8_t int48_t
+# define __uint_least8_t uint48_t
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+typedef __INT40_TYPE__ int40_t;
+typedef __UINT40_TYPE__ uint40_t;
+typedef int40_t int_least40_t;
+typedef uint40_t uint_least40_t;
+typedef int40_t int_fast40_t;
+typedef uint40_t uint_fast40_t;
+# define __int_least32_t int40_t
+# define __uint_least32_t uint40_t
+# define __int_least16_t int40_t
+# define __uint_least16_t uint40_t
+# define __int_least8_t int40_t
+# define __uint_least8_t uint40_t
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+
+# ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/
+typedef __INT32_TYPE__ int32_t;
+# endif /* __int8_t_defined */
+
+# ifndef __uint32_t_defined /* more glibc compatibility */
+# define __uint32_t_defined
+typedef __UINT32_TYPE__ uint32_t;
+# endif /* __uint32_t_defined */
+
+# define __int_least32_t int32_t
+# define __uint_least32_t uint32_t
+# define __int_least16_t int32_t
+# define __uint_least16_t uint32_t
+# define __int_least8_t int32_t
+# define __uint_least8_t uint32_t
+#endif /* __INT32_TYPE__ */
+
+#ifdef __int_least32_t
+typedef __int_least32_t int_least32_t;
+typedef __uint_least32_t uint_least32_t;
+typedef __int_least32_t int_fast32_t;
+typedef __uint_least32_t uint_fast32_t;
+#endif /* __int_least32_t */
+
+#ifdef __INT24_TYPE__
+typedef __INT24_TYPE__ int24_t;
+typedef __UINT24_TYPE__ uint24_t;
+typedef int24_t int_least24_t;
+typedef uint24_t uint_least24_t;
+typedef int24_t int_fast24_t;
+typedef uint24_t uint_fast24_t;
+# define __int_least16_t int24_t
+# define __uint_least16_t uint24_t
+# define __int_least8_t int24_t
+# define __uint_least8_t uint24_t
+#endif /* __INT24_TYPE__ */
+
+#ifdef __INT16_TYPE__
+#ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/
+typedef __INT16_TYPE__ int16_t;
+#endif /* __int8_t_defined */
+typedef __UINT16_TYPE__ uint16_t;
+# define __int_least16_t int16_t
+# define __uint_least16_t uint16_t
+# define __int_least8_t int16_t
+# define __uint_least8_t uint16_t
+#endif /* __INT16_TYPE__ */
+
+#ifdef __int_least16_t
+typedef __int_least16_t int_least16_t;
+typedef __uint_least16_t uint_least16_t;
+typedef __int_least16_t int_fast16_t;
+typedef __uint_least16_t uint_fast16_t;
+#endif /* __int_least16_t */
+
+
+#ifdef __INT8_TYPE__
+#ifndef __int8_t_defined /* glibc sys/types.h also defines int8_t*/
+typedef __INT8_TYPE__ int8_t;
+#endif /* __int8_t_defined */
+typedef __UINT8_TYPE__ uint8_t;
+# define __int_least8_t int8_t
+# define __uint_least8_t uint8_t
+#endif /* __INT8_TYPE__ */
+
+#ifdef __int_least8_t
+typedef __int_least8_t int_least8_t;
+typedef __uint_least8_t uint_least8_t;
+typedef __int_least8_t int_fast8_t;
+typedef __uint_least8_t uint_fast8_t;
+#endif /* __int_least8_t */
+
+/* prevent glibc sys/types.h from defining conflicting types */
+#ifndef __int8_t_defined
+# define __int8_t_defined
+#endif /* __int8_t_defined */
+
+/* C99 7.18.1.4 Integer types capable of holding object pointers.
+ */
+#define __stdint_join3(a,b,c) a ## b ## c
+
+#define __intn_t(n) __stdint_join3( int, n, _t)
+#define __uintn_t(n) __stdint_join3(uint, n, _t)
+
+#ifndef _INTPTR_T
+#ifndef __intptr_t_defined
+typedef __intn_t(__INTPTR_WIDTH__) intptr_t;
+#define __intptr_t_defined
+#define _INTPTR_T
+#endif
+#endif
+
+#ifndef _UINTPTR_T
+typedef __uintn_t(__INTPTR_WIDTH__) uintptr_t;
+#define _UINTPTR_T
+#endif
+
+/* C99 7.18.1.5 Greatest-width integer types.
+ */
+typedef __INTMAX_TYPE__ intmax_t;
+typedef __UINTMAX_TYPE__ uintmax_t;
+
+/* C99 7.18.4 Macros for minimum-width integer constants.
+ *
+ * The standard requires that integer constant macros be defined for all the
+ * minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width
+ * types are required, the corresponding integer constant macros are defined
+ * here. This implementation also defines minimum-width types for every other
+ * integer width that the target implements, so corresponding macros are
+ * defined below, too.
+ *
+ * These macros are defined using the same successive-shrinking approach as
+ * the type definitions above. It is likewise important that macros are defined
+ * in order of decending width.
+ *
+ * Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the
+ * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
+ */
+
+#define __int_c_join(a, b) a ## b
+#define __int_c(v, suffix) __int_c_join(v, suffix)
+#define __uint_c(v, suffix) __int_c_join(v##U, suffix)
+
+
+#ifdef __INT64_TYPE__
+# ifdef __INT64_C_SUFFIX__
+# define __int64_c_suffix __INT64_C_SUFFIX__
+# define __int32_c_suffix __INT64_C_SUFFIX__
+# define __int16_c_suffix __INT64_C_SUFFIX__
+# define __int8_c_suffix __INT64_C_SUFFIX__
+# else
+# undef __int64_c_suffix
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT64_C_SUFFIX__ */
+#endif /* __INT64_TYPE__ */
+
+#ifdef __int_least64_t
+# ifdef __int64_c_suffix
+# define INT64_C(v) __int_c(v, __int64_c_suffix)
+# define UINT64_C(v) __uint_c(v, __int64_c_suffix)
+# else
+# define INT64_C(v) v
+# define UINT64_C(v) v ## U
+# endif /* __int64_c_suffix */
+#endif /* __int_least64_t */
+
+
+#ifdef __INT56_TYPE__
+# ifdef __INT56_C_SUFFIX__
+# define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
+# define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
+# define __int32_c_suffix __INT56_C_SUFFIX__
+# define __int16_c_suffix __INT56_C_SUFFIX__
+# define __int8_c_suffix __INT56_C_SUFFIX__
+# else
+# define INT56_C(v) v
+# define UINT56_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT56_C_SUFFIX__ */
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+# ifdef __INT48_C_SUFFIX__
+# define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
+# define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
+# define __int32_c_suffix __INT48_C_SUFFIX__
+# define __int16_c_suffix __INT48_C_SUFFIX__
+# define __int8_c_suffix __INT48_C_SUFFIX__
+# else
+# define INT48_C(v) v
+# define UINT48_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT48_C_SUFFIX__ */
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+# ifdef __INT40_C_SUFFIX__
+# define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
+# define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
+# define __int32_c_suffix __INT40_C_SUFFIX__
+# define __int16_c_suffix __INT40_C_SUFFIX__
+# define __int8_c_suffix __INT40_C_SUFFIX__
+# else
+# define INT40_C(v) v
+# define UINT40_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT40_C_SUFFIX__ */
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+# ifdef __INT32_C_SUFFIX__
+# define __int32_c_suffix __INT32_C_SUFFIX__
+# define __int16_c_suffix __INT32_C_SUFFIX__
+# define __int8_c_suffix __INT32_C_SUFFIX__
+#else
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT32_C_SUFFIX__ */
+#endif /* __INT32_TYPE__ */
+
+#ifdef __int_least32_t
+# ifdef __int32_c_suffix
+# define INT32_C(v) __int_c(v, __int32_c_suffix)
+# define UINT32_C(v) __uint_c(v, __int32_c_suffix)
+# else
+# define INT32_C(v) v
+# define UINT32_C(v) v ## U
+# endif /* __int32_c_suffix */
+#endif /* __int_least32_t */
+
+
+#ifdef __INT24_TYPE__
+# ifdef __INT24_C_SUFFIX__
+# define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
+# define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
+# define __int16_c_suffix __INT24_C_SUFFIX__
+# define __int8_c_suffix __INT24_C_SUFFIX__
+# else
+# define INT24_C(v) v
+# define UINT24_C(v) v ## U
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT24_C_SUFFIX__ */
+#endif /* __INT24_TYPE__ */
+
+
+#ifdef __INT16_TYPE__
+# ifdef __INT16_C_SUFFIX__
+# define __int16_c_suffix __INT16_C_SUFFIX__
+# define __int8_c_suffix __INT16_C_SUFFIX__
+#else
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT16_C_SUFFIX__ */
+#endif /* __INT16_TYPE__ */
+
+#ifdef __int_least16_t
+# ifdef __int16_c_suffix
+# define INT16_C(v) __int_c(v, __int16_c_suffix)
+# define UINT16_C(v) __uint_c(v, __int16_c_suffix)
+# else
+# define INT16_C(v) v
+# define UINT16_C(v) v ## U
+# endif /* __int16_c_suffix */
+#endif /* __int_least16_t */
+
+
+#ifdef __INT8_TYPE__
+# ifdef __INT8_C_SUFFIX__
+# define __int8_c_suffix __INT8_C_SUFFIX__
+#else
+# undef __int8_c_suffix
+# endif /* __INT8_C_SUFFIX__ */
+#endif /* __INT8_TYPE__ */
+
+#ifdef __int_least8_t
+# ifdef __int8_c_suffix
+# define INT8_C(v) __int_c(v, __int8_c_suffix)
+# define UINT8_C(v) __uint_c(v, __int8_c_suffix)
+# else
+# define INT8_C(v) v
+# define UINT8_C(v) v ## U
+# endif /* __int8_c_suffix */
+#endif /* __int_least8_t */
+
+
+/* C99 7.18.2.1 Limits of exact-width integer types.
+ * C99 7.18.2.2 Limits of minimum-width integer types.
+ * C99 7.18.2.3 Limits of fastest minimum-width integer types.
+ *
+ * The presence of limit macros are completely optional in C99. This
+ * implementation defines limits for all of the types (exact- and
+ * minimum-width) that it defines above, using the limits of the minimum-width
+ * type for any types that do not have exact-width representations.
+ *
+ * As in the type definitions, this section takes an approach of
+ * successive-shrinking to determine which limits to use for the standard (8,
+ * 16, 32, 64) bit widths when they don't have exact representations. It is
+ * therefore important that the defintions be kept in order of decending
+ * widths.
+ *
+ * Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the
+ * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
+ */
+
+#ifdef __INT64_TYPE__
+# define INT64_MAX INT64_C( 9223372036854775807)
+# define INT64_MIN (-INT64_C( 9223372036854775807)-1)
+# define UINT64_MAX UINT64_C(18446744073709551615)
+# define __INT_LEAST64_MIN INT64_MIN
+# define __INT_LEAST64_MAX INT64_MAX
+# define __UINT_LEAST64_MAX UINT64_MAX
+# define __INT_LEAST32_MIN INT64_MIN
+# define __INT_LEAST32_MAX INT64_MAX
+# define __UINT_LEAST32_MAX UINT64_MAX
+# define __INT_LEAST16_MIN INT64_MIN
+# define __INT_LEAST16_MAX INT64_MAX
+# define __UINT_LEAST16_MAX UINT64_MAX
+# define __INT_LEAST8_MIN INT64_MIN
+# define __INT_LEAST8_MAX INT64_MAX
+# define __UINT_LEAST8_MAX UINT64_MAX
+#endif /* __INT64_TYPE__ */
+
+#ifdef __INT_LEAST64_MIN
+# define INT_LEAST64_MIN __INT_LEAST64_MIN
+# define INT_LEAST64_MAX __INT_LEAST64_MAX
+# define UINT_LEAST64_MAX __UINT_LEAST64_MAX
+# define INT_FAST64_MIN __INT_LEAST64_MIN
+# define INT_FAST64_MAX __INT_LEAST64_MAX
+# define UINT_FAST64_MAX __UINT_LEAST64_MAX
+#endif /* __INT_LEAST64_MIN */
+
+
+#ifdef __INT56_TYPE__
+# define INT56_MAX INT56_C(36028797018963967)
+# define INT56_MIN (-INT56_C(36028797018963967)-1)
+# define UINT56_MAX UINT56_C(72057594037927935)
+# define INT_LEAST56_MIN INT56_MIN
+# define INT_LEAST56_MAX INT56_MAX
+# define UINT_LEAST56_MAX UINT56_MAX
+# define INT_FAST56_MIN INT56_MIN
+# define INT_FAST56_MAX INT56_MAX
+# define UINT_FAST56_MAX UINT56_MAX
+# define __INT_LEAST32_MIN INT56_MIN
+# define __INT_LEAST32_MAX INT56_MAX
+# define __UINT_LEAST32_MAX UINT56_MAX
+# define __INT_LEAST16_MIN INT56_MIN
+# define __INT_LEAST16_MAX INT56_MAX
+# define __UINT_LEAST16_MAX UINT56_MAX
+# define __INT_LEAST8_MIN INT56_MIN
+# define __INT_LEAST8_MAX INT56_MAX
+# define __UINT_LEAST8_MAX UINT56_MAX
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+# define INT48_MAX INT48_C(140737488355327)
+# define INT48_MIN (-INT48_C(140737488355327)-1)
+# define UINT48_MAX UINT48_C(281474976710655)
+# define INT_LEAST48_MIN INT48_MIN
+# define INT_LEAST48_MAX INT48_MAX
+# define UINT_LEAST48_MAX UINT48_MAX
+# define INT_FAST48_MIN INT48_MIN
+# define INT_FAST48_MAX INT48_MAX
+# define UINT_FAST48_MAX UINT48_MAX
+# define __INT_LEAST32_MIN INT48_MIN
+# define __INT_LEAST32_MAX INT48_MAX
+# define __UINT_LEAST32_MAX UINT48_MAX
+# define __INT_LEAST16_MIN INT48_MIN
+# define __INT_LEAST16_MAX INT48_MAX
+# define __UINT_LEAST16_MAX UINT48_MAX
+# define __INT_LEAST8_MIN INT48_MIN
+# define __INT_LEAST8_MAX INT48_MAX
+# define __UINT_LEAST8_MAX UINT48_MAX
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+# define INT40_MAX INT40_C(549755813887)
+# define INT40_MIN (-INT40_C(549755813887)-1)
+# define UINT40_MAX UINT40_C(1099511627775)
+# define INT_LEAST40_MIN INT40_MIN
+# define INT_LEAST40_MAX INT40_MAX
+# define UINT_LEAST40_MAX UINT40_MAX
+# define INT_FAST40_MIN INT40_MIN
+# define INT_FAST40_MAX INT40_MAX
+# define UINT_FAST40_MAX UINT40_MAX
+# define __INT_LEAST32_MIN INT40_MIN
+# define __INT_LEAST32_MAX INT40_MAX
+# define __UINT_LEAST32_MAX UINT40_MAX
+# define __INT_LEAST16_MIN INT40_MIN
+# define __INT_LEAST16_MAX INT40_MAX
+# define __UINT_LEAST16_MAX UINT40_MAX
+# define __INT_LEAST8_MIN INT40_MIN
+# define __INT_LEAST8_MAX INT40_MAX
+# define __UINT_LEAST8_MAX UINT40_MAX
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+# define INT32_MAX INT32_C(2147483647)
+# define INT32_MIN (-INT32_C(2147483647)-1)
+# define UINT32_MAX UINT32_C(4294967295)
+# define __INT_LEAST32_MIN INT32_MIN
+# define __INT_LEAST32_MAX INT32_MAX
+# define __UINT_LEAST32_MAX UINT32_MAX
+# define __INT_LEAST16_MIN INT32_MIN
+# define __INT_LEAST16_MAX INT32_MAX
+# define __UINT_LEAST16_MAX UINT32_MAX
+# define __INT_LEAST8_MIN INT32_MIN
+# define __INT_LEAST8_MAX INT32_MAX
+# define __UINT_LEAST8_MAX UINT32_MAX
+#endif /* __INT32_TYPE__ */
+
+#ifdef __INT_LEAST32_MIN
+# define INT_LEAST32_MIN __INT_LEAST32_MIN
+# define INT_LEAST32_MAX __INT_LEAST32_MAX
+# define UINT_LEAST32_MAX __UINT_LEAST32_MAX
+# define INT_FAST32_MIN __INT_LEAST32_MIN
+# define INT_FAST32_MAX __INT_LEAST32_MAX
+# define UINT_FAST32_MAX __UINT_LEAST32_MAX
+#endif /* __INT_LEAST32_MIN */
+
+
+#ifdef __INT24_TYPE__
+# define INT24_MAX INT24_C(8388607)
+# define INT24_MIN (-INT24_C(8388607)-1)
+# define UINT24_MAX UINT24_C(16777215)
+# define INT_LEAST24_MIN INT24_MIN
+# define INT_LEAST24_MAX INT24_MAX
+# define UINT_LEAST24_MAX UINT24_MAX
+# define INT_FAST24_MIN INT24_MIN
+# define INT_FAST24_MAX INT24_MAX
+# define UINT_FAST24_MAX UINT24_MAX
+# define __INT_LEAST16_MIN INT24_MIN
+# define __INT_LEAST16_MAX INT24_MAX
+# define __UINT_LEAST16_MAX UINT24_MAX
+# define __INT_LEAST8_MIN INT24_MIN
+# define __INT_LEAST8_MAX INT24_MAX
+# define __UINT_LEAST8_MAX UINT24_MAX
+#endif /* __INT24_TYPE__ */
+
+
+#ifdef __INT16_TYPE__
+#define INT16_MAX INT16_C(32767)
+#define INT16_MIN (-INT16_C(32767)-1)
+#define UINT16_MAX UINT16_C(65535)
+# define __INT_LEAST16_MIN INT16_MIN
+# define __INT_LEAST16_MAX INT16_MAX
+# define __UINT_LEAST16_MAX UINT16_MAX
+# define __INT_LEAST8_MIN INT16_MIN
+# define __INT_LEAST8_MAX INT16_MAX
+# define __UINT_LEAST8_MAX UINT16_MAX
+#endif /* __INT16_TYPE__ */
+
+#ifdef __INT_LEAST16_MIN
+# define INT_LEAST16_MIN __INT_LEAST16_MIN
+# define INT_LEAST16_MAX __INT_LEAST16_MAX
+# define UINT_LEAST16_MAX __UINT_LEAST16_MAX
+# define INT_FAST16_MIN __INT_LEAST16_MIN
+# define INT_FAST16_MAX __INT_LEAST16_MAX
+# define UINT_FAST16_MAX __UINT_LEAST16_MAX
+#endif /* __INT_LEAST16_MIN */
+
+
+#ifdef __INT8_TYPE__
+# define INT8_MAX INT8_C(127)
+# define INT8_MIN (-INT8_C(127)-1)
+# define UINT8_MAX UINT8_C(255)
+# define __INT_LEAST8_MIN INT8_MIN
+# define __INT_LEAST8_MAX INT8_MAX
+# define __UINT_LEAST8_MAX UINT8_MAX
+#endif /* __INT8_TYPE__ */
+
+#ifdef __INT_LEAST8_MIN
+# define INT_LEAST8_MIN __INT_LEAST8_MIN
+# define INT_LEAST8_MAX __INT_LEAST8_MAX
+# define UINT_LEAST8_MAX __UINT_LEAST8_MAX
+# define INT_FAST8_MIN __INT_LEAST8_MIN
+# define INT_FAST8_MAX __INT_LEAST8_MAX
+# define UINT_FAST8_MAX __UINT_LEAST8_MAX
+#endif /* __INT_LEAST8_MIN */
+
+/* Some utility macros */
+#define __INTN_MIN(n) __stdint_join3( INT, n, _MIN)
+#define __INTN_MAX(n) __stdint_join3( INT, n, _MAX)
+#define __UINTN_MAX(n) __stdint_join3(UINT, n, _MAX)
+#define __INTN_C(n, v) __stdint_join3( INT, n, _C(v))
+#define __UINTN_C(n, v) __stdint_join3(UINT, n, _C(v))
+
+/* C99 7.18.2.4 Limits of integer types capable of holding object pointers. */
+/* C99 7.18.3 Limits of other integer types. */
+
+#define INTPTR_MIN __INTN_MIN(__INTPTR_WIDTH__)
+#define INTPTR_MAX __INTN_MAX(__INTPTR_WIDTH__)
+#define UINTPTR_MAX __UINTN_MAX(__INTPTR_WIDTH__)
+#define PTRDIFF_MIN __INTN_MIN(__PTRDIFF_WIDTH__)
+#define PTRDIFF_MAX __INTN_MAX(__PTRDIFF_WIDTH__)
+#define SIZE_MAX __UINTN_MAX(__SIZE_WIDTH__)
+
+/* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
+ * is enabled. */
+#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
+#define RSIZE_MAX (SIZE_MAX >> 1)
+#endif
+
+/* C99 7.18.2.5 Limits of greatest-width integer types. */
+#define INTMAX_MIN __INTN_MIN(__INTMAX_WIDTH__)
+#define INTMAX_MAX __INTN_MAX(__INTMAX_WIDTH__)
+#define UINTMAX_MAX __UINTN_MAX(__INTMAX_WIDTH__)
+
+/* C99 7.18.3 Limits of other integer types. */
+#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
+#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
+#ifdef __WINT_UNSIGNED__
+# define WINT_MIN __UINTN_C(__WINT_WIDTH__, 0)
+# define WINT_MAX __UINTN_MAX(__WINT_WIDTH__)
+#else
+# define WINT_MIN __INTN_MIN(__WINT_WIDTH__)
+# define WINT_MAX __INTN_MAX(__WINT_WIDTH__)
+#endif
+
+#ifndef WCHAR_MAX
+# define WCHAR_MAX __WCHAR_MAX__
+#endif
+#ifndef WCHAR_MIN
+# if __WCHAR_MAX__ == __INTN_MAX(__WCHAR_WIDTH__)
+# define WCHAR_MIN __INTN_MIN(__WCHAR_WIDTH__)
+# else
+# define WCHAR_MIN __UINTN_C(__WCHAR_WIDTH__, 0)
+# endif
+#endif
+
+/* 7.18.4.2 Macros for greatest-width integer constants. */
+#define INTMAX_C(v) __INTN_C(__INTMAX_WIDTH__, v)
+#define UINTMAX_C(v) __UINTN_C(__INTMAX_WIDTH__, v)
+
+#endif /* __STDC_HOSTED__ */
+#endif /* __CLANG_STDINT_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/stdnoreturn.h b/clang-2690385/lib64/clang/3.8/include/stdnoreturn.h
new file mode 100644
index 00000000..a7a301d7
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/stdnoreturn.h
@@ -0,0 +1,30 @@
+/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDNORETURN_H
+#define __STDNORETURN_H
+
+#define noreturn _Noreturn
+#define __noreturn_is_defined 1
+
+#endif /* __STDNORETURN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/tbmintrin.h b/clang-2690385/lib64/clang/3.8/include/tbmintrin.h
new file mode 100644
index 00000000..785961c6
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/tbmintrin.h
@@ -0,0 +1,154 @@
+/*===---- tbmintrin.h - TBM intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <tbmintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __TBMINTRIN_H
+#define __TBMINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("tbm")))
+
+#define __bextri_u32(a, b) \
+ ((unsigned int)__builtin_ia32_bextri_u32((unsigned int)(a), \
+ (unsigned int)(b)))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcfill_u32(unsigned int a)
+{
+ return a & (a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blci_u32(unsigned int a)
+{
+ return a | ~(a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcic_u32(unsigned int a)
+{
+ return ~a & (a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcmsk_u32(unsigned int a)
+{
+ return a ^ (a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcs_u32(unsigned int a)
+{
+ return a | (a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsfill_u32(unsigned int a)
+{
+ return a | (a - 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsic_u32(unsigned int a)
+{
+ return ~a | (a - 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__t1mskc_u32(unsigned int a)
+{
+ return ~a | (a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__tzmsk_u32(unsigned int a)
+{
+ return ~a & (a - 1);
+}
+
+#ifdef __x86_64__
+#define __bextri_u64(a, b) \
+ ((unsigned long long)__builtin_ia32_bextri_u64((unsigned long long)(a), \
+ (unsigned long long)(b)))
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcfill_u64(unsigned long long a)
+{
+ return a & (a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blci_u64(unsigned long long a)
+{
+ return a | ~(a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcic_u64(unsigned long long a)
+{
+ return ~a & (a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcmsk_u64(unsigned long long a)
+{
+ return a ^ (a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcs_u64(unsigned long long a)
+{
+ return a | (a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsfill_u64(unsigned long long a)
+{
+ return a | (a - 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsic_u64(unsigned long long a)
+{
+ return ~a | (a - 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__t1mskc_u64(unsigned long long a)
+{
+ return ~a | (a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__tzmsk_u64(unsigned long long a)
+{
+ return ~a & (a - 1);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __TBMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/tgmath.h b/clang-2690385/lib64/clang/3.8/include/tgmath.h
new file mode 100644
index 00000000..318e1185
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/tgmath.h
@@ -0,0 +1,1374 @@
+/*===---- tgmath.h - Standard header for type generic math ----------------===*\
+ *
+ * Copyright (c) 2009 Howard Hinnant
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __TGMATH_H
+#define __TGMATH_H
+
+/* C99 7.22 Type-generic math <tgmath.h>. */
+#include <math.h>
+
+/* C++ handles type genericity with overloading in math.h. */
+#ifndef __cplusplus
+#include <complex.h>
+
+#define _TG_ATTRSp __attribute__((__overloadable__))
+#define _TG_ATTRS __attribute__((__overloadable__, __always_inline__))
+
+// promotion
+
+typedef void _Argument_type_is_not_arithmetic;
+static _Argument_type_is_not_arithmetic __tg_promote(...)
+ __attribute__((__unavailable__,__overloadable__));
+static double _TG_ATTRSp __tg_promote(int);
+static double _TG_ATTRSp __tg_promote(unsigned int);
+static double _TG_ATTRSp __tg_promote(long);
+static double _TG_ATTRSp __tg_promote(unsigned long);
+static double _TG_ATTRSp __tg_promote(long long);
+static double _TG_ATTRSp __tg_promote(unsigned long long);
+static float _TG_ATTRSp __tg_promote(float);
+static double _TG_ATTRSp __tg_promote(double);
+static long double _TG_ATTRSp __tg_promote(long double);
+static float _Complex _TG_ATTRSp __tg_promote(float _Complex);
+static double _Complex _TG_ATTRSp __tg_promote(double _Complex);
+static long double _Complex _TG_ATTRSp __tg_promote(long double _Complex);
+
+#define __tg_promote1(__x) (__typeof__(__tg_promote(__x)))
+#define __tg_promote2(__x, __y) (__typeof__(__tg_promote(__x) + \
+ __tg_promote(__y)))
+#define __tg_promote3(__x, __y, __z) (__typeof__(__tg_promote(__x) + \
+ __tg_promote(__y) + \
+ __tg_promote(__z)))
+
+// acos
+
+static float
+ _TG_ATTRS
+ __tg_acos(float __x) {return acosf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_acos(double __x) {return acos(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_acos(long double __x) {return acosl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_acos(float _Complex __x) {return cacosf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_acos(double _Complex __x) {return cacos(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_acos(long double _Complex __x) {return cacosl(__x);}
+
+#undef acos
+#define acos(__x) __tg_acos(__tg_promote1((__x))(__x))
+
+// asin
+
+static float
+ _TG_ATTRS
+ __tg_asin(float __x) {return asinf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_asin(double __x) {return asin(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_asin(long double __x) {return asinl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_asin(float _Complex __x) {return casinf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_asin(double _Complex __x) {return casin(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_asin(long double _Complex __x) {return casinl(__x);}
+
+#undef asin
+#define asin(__x) __tg_asin(__tg_promote1((__x))(__x))
+
+// atan
+
+static float
+ _TG_ATTRS
+ __tg_atan(float __x) {return atanf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_atan(double __x) {return atan(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_atan(long double __x) {return atanl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_atan(float _Complex __x) {return catanf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_atan(double _Complex __x) {return catan(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_atan(long double _Complex __x) {return catanl(__x);}
+
+#undef atan
+#define atan(__x) __tg_atan(__tg_promote1((__x))(__x))
+
+// acosh
+
+static float
+ _TG_ATTRS
+ __tg_acosh(float __x) {return acoshf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_acosh(double __x) {return acosh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_acosh(long double __x) {return acoshl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_acosh(float _Complex __x) {return cacoshf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_acosh(double _Complex __x) {return cacosh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_acosh(long double _Complex __x) {return cacoshl(__x);}
+
+#undef acosh
+#define acosh(__x) __tg_acosh(__tg_promote1((__x))(__x))
+
+// asinh
+
+static float
+ _TG_ATTRS
+ __tg_asinh(float __x) {return asinhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_asinh(double __x) {return asinh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_asinh(long double __x) {return asinhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_asinh(float _Complex __x) {return casinhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_asinh(double _Complex __x) {return casinh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_asinh(long double _Complex __x) {return casinhl(__x);}
+
+#undef asinh
+#define asinh(__x) __tg_asinh(__tg_promote1((__x))(__x))
+
+// atanh
+
+static float
+ _TG_ATTRS
+ __tg_atanh(float __x) {return atanhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_atanh(double __x) {return atanh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_atanh(long double __x) {return atanhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_atanh(float _Complex __x) {return catanhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_atanh(double _Complex __x) {return catanh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_atanh(long double _Complex __x) {return catanhl(__x);}
+
+#undef atanh
+#define atanh(__x) __tg_atanh(__tg_promote1((__x))(__x))
+
+// cos
+
+static float
+ _TG_ATTRS
+ __tg_cos(float __x) {return cosf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cos(double __x) {return cos(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cos(long double __x) {return cosl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cos(float _Complex __x) {return ccosf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cos(double _Complex __x) {return ccos(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cos(long double _Complex __x) {return ccosl(__x);}
+
+#undef cos
+#define cos(__x) __tg_cos(__tg_promote1((__x))(__x))
+
+// sin
+
+static float
+ _TG_ATTRS
+ __tg_sin(float __x) {return sinf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sin(double __x) {return sin(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sin(long double __x) {return sinl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sin(float _Complex __x) {return csinf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sin(double _Complex __x) {return csin(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sin(long double _Complex __x) {return csinl(__x);}
+
+#undef sin
+#define sin(__x) __tg_sin(__tg_promote1((__x))(__x))
+
+// tan
+
+static float
+ _TG_ATTRS
+ __tg_tan(float __x) {return tanf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tan(double __x) {return tan(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tan(long double __x) {return tanl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_tan(float _Complex __x) {return ctanf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_tan(double _Complex __x) {return ctan(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_tan(long double _Complex __x) {return ctanl(__x);}
+
+#undef tan
+#define tan(__x) __tg_tan(__tg_promote1((__x))(__x))
+
+// cosh
+
+static float
+ _TG_ATTRS
+ __tg_cosh(float __x) {return coshf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cosh(double __x) {return cosh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cosh(long double __x) {return coshl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cosh(float _Complex __x) {return ccoshf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cosh(double _Complex __x) {return ccosh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cosh(long double _Complex __x) {return ccoshl(__x);}
+
+#undef cosh
+#define cosh(__x) __tg_cosh(__tg_promote1((__x))(__x))
+
+// sinh
+
+static float
+ _TG_ATTRS
+ __tg_sinh(float __x) {return sinhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sinh(double __x) {return sinh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sinh(long double __x) {return sinhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sinh(float _Complex __x) {return csinhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sinh(double _Complex __x) {return csinh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sinh(long double _Complex __x) {return csinhl(__x);}
+
+#undef sinh
+#define sinh(__x) __tg_sinh(__tg_promote1((__x))(__x))
+
+// tanh
+
+static float
+ _TG_ATTRS
+ __tg_tanh(float __x) {return tanhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tanh(double __x) {return tanh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tanh(long double __x) {return tanhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_tanh(float _Complex __x) {return ctanhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_tanh(double _Complex __x) {return ctanh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_tanh(long double _Complex __x) {return ctanhl(__x);}
+
+#undef tanh
+#define tanh(__x) __tg_tanh(__tg_promote1((__x))(__x))
+
+// exp
+
+static float
+ _TG_ATTRS
+ __tg_exp(float __x) {return expf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_exp(double __x) {return exp(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_exp(long double __x) {return expl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_exp(float _Complex __x) {return cexpf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_exp(double _Complex __x) {return cexp(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_exp(long double _Complex __x) {return cexpl(__x);}
+
+#undef exp
+#define exp(__x) __tg_exp(__tg_promote1((__x))(__x))
+
+// log
+
+static float
+ _TG_ATTRS
+ __tg_log(float __x) {return logf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log(double __x) {return log(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log(long double __x) {return logl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_log(float _Complex __x) {return clogf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_log(double _Complex __x) {return clog(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_log(long double _Complex __x) {return clogl(__x);}
+
+#undef log
+#define log(__x) __tg_log(__tg_promote1((__x))(__x))
+
+// pow
+
+static float
+ _TG_ATTRS
+ __tg_pow(float __x, float __y) {return powf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_pow(double __x, double __y) {return pow(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_pow(long double __x, long double __y) {return powl(__x, __y);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_pow(float _Complex __x, float _Complex __y) {return cpowf(__x, __y);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_pow(double _Complex __x, double _Complex __y) {return cpow(__x, __y);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_pow(long double _Complex __x, long double _Complex __y)
+ {return cpowl(__x, __y);}
+
+#undef pow
+#define pow(__x, __y) __tg_pow(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// sqrt
+
+static float
+ _TG_ATTRS
+ __tg_sqrt(float __x) {return sqrtf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sqrt(double __x) {return sqrt(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sqrt(long double __x) {return sqrtl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sqrt(float _Complex __x) {return csqrtf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sqrt(double _Complex __x) {return csqrt(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sqrt(long double _Complex __x) {return csqrtl(__x);}
+
+#undef sqrt
+#define sqrt(__x) __tg_sqrt(__tg_promote1((__x))(__x))
+
+// fabs
+
+static float
+ _TG_ATTRS
+ __tg_fabs(float __x) {return fabsf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_fabs(double __x) {return fabs(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_fabs(long double __x) {return fabsl(__x);}
+
+static float
+ _TG_ATTRS
+ __tg_fabs(float _Complex __x) {return cabsf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_fabs(double _Complex __x) {return cabs(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_fabs(long double _Complex __x) {return cabsl(__x);}
+
+#undef fabs
+#define fabs(__x) __tg_fabs(__tg_promote1((__x))(__x))
+
+// atan2
+
+static float
+ _TG_ATTRS
+ __tg_atan2(float __x, float __y) {return atan2f(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_atan2(double __x, double __y) {return atan2(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_atan2(long double __x, long double __y) {return atan2l(__x, __y);}
+
+#undef atan2
+#define atan2(__x, __y) __tg_atan2(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// cbrt
+
+static float
+ _TG_ATTRS
+ __tg_cbrt(float __x) {return cbrtf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cbrt(double __x) {return cbrt(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cbrt(long double __x) {return cbrtl(__x);}
+
+#undef cbrt
+#define cbrt(__x) __tg_cbrt(__tg_promote1((__x))(__x))
+
+// ceil
+
+static float
+ _TG_ATTRS
+ __tg_ceil(float __x) {return ceilf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_ceil(double __x) {return ceil(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_ceil(long double __x) {return ceill(__x);}
+
+#undef ceil
+#define ceil(__x) __tg_ceil(__tg_promote1((__x))(__x))
+
+// copysign
+
+static float
+ _TG_ATTRS
+ __tg_copysign(float __x, float __y) {return copysignf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_copysign(double __x, double __y) {return copysign(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_copysign(long double __x, long double __y) {return copysignl(__x, __y);}
+
+#undef copysign
+#define copysign(__x, __y) __tg_copysign(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// erf
+
+static float
+ _TG_ATTRS
+ __tg_erf(float __x) {return erff(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_erf(double __x) {return erf(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_erf(long double __x) {return erfl(__x);}
+
+#undef erf
+#define erf(__x) __tg_erf(__tg_promote1((__x))(__x))
+
+// erfc
+
+static float
+ _TG_ATTRS
+ __tg_erfc(float __x) {return erfcf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_erfc(double __x) {return erfc(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_erfc(long double __x) {return erfcl(__x);}
+
+#undef erfc
+#define erfc(__x) __tg_erfc(__tg_promote1((__x))(__x))
+
+// exp2
+
+static float
+ _TG_ATTRS
+ __tg_exp2(float __x) {return exp2f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_exp2(double __x) {return exp2(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_exp2(long double __x) {return exp2l(__x);}
+
+#undef exp2
+#define exp2(__x) __tg_exp2(__tg_promote1((__x))(__x))
+
+// expm1
+
+static float
+ _TG_ATTRS
+ __tg_expm1(float __x) {return expm1f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_expm1(double __x) {return expm1(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_expm1(long double __x) {return expm1l(__x);}
+
+#undef expm1
+#define expm1(__x) __tg_expm1(__tg_promote1((__x))(__x))
+
+// fdim
+
+static float
+ _TG_ATTRS
+ __tg_fdim(float __x, float __y) {return fdimf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fdim(double __x, double __y) {return fdim(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fdim(long double __x, long double __y) {return fdiml(__x, __y);}
+
+#undef fdim
+#define fdim(__x, __y) __tg_fdim(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// floor
+
+static float
+ _TG_ATTRS
+ __tg_floor(float __x) {return floorf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_floor(double __x) {return floor(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_floor(long double __x) {return floorl(__x);}
+
+#undef floor
+#define floor(__x) __tg_floor(__tg_promote1((__x))(__x))
+
+// fma
+
+static float
+ _TG_ATTRS
+ __tg_fma(float __x, float __y, float __z)
+ {return fmaf(__x, __y, __z);}
+
+static double
+ _TG_ATTRS
+ __tg_fma(double __x, double __y, double __z)
+ {return fma(__x, __y, __z);}
+
+static long double
+ _TG_ATTRS
+ __tg_fma(long double __x,long double __y, long double __z)
+ {return fmal(__x, __y, __z);}
+
+#undef fma
+#define fma(__x, __y, __z) \
+ __tg_fma(__tg_promote3((__x), (__y), (__z))(__x), \
+ __tg_promote3((__x), (__y), (__z))(__y), \
+ __tg_promote3((__x), (__y), (__z))(__z))
+
+// fmax
+
+static float
+ _TG_ATTRS
+ __tg_fmax(float __x, float __y) {return fmaxf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmax(double __x, double __y) {return fmax(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmax(long double __x, long double __y) {return fmaxl(__x, __y);}
+
+#undef fmax
+#define fmax(__x, __y) __tg_fmax(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// fmin
+
+static float
+ _TG_ATTRS
+ __tg_fmin(float __x, float __y) {return fminf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmin(double __x, double __y) {return fmin(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmin(long double __x, long double __y) {return fminl(__x, __y);}
+
+#undef fmin
+#define fmin(__x, __y) __tg_fmin(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// fmod
+
+static float
+ _TG_ATTRS
+ __tg_fmod(float __x, float __y) {return fmodf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmod(double __x, double __y) {return fmod(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmod(long double __x, long double __y) {return fmodl(__x, __y);}
+
+#undef fmod
+#define fmod(__x, __y) __tg_fmod(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// frexp
+
+static float
+ _TG_ATTRS
+ __tg_frexp(float __x, int* __y) {return frexpf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_frexp(double __x, int* __y) {return frexp(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_frexp(long double __x, int* __y) {return frexpl(__x, __y);}
+
+#undef frexp
+#define frexp(__x, __y) __tg_frexp(__tg_promote1((__x))(__x), __y)
+
+// hypot
+
+static float
+ _TG_ATTRS
+ __tg_hypot(float __x, float __y) {return hypotf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_hypot(double __x, double __y) {return hypot(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_hypot(long double __x, long double __y) {return hypotl(__x, __y);}
+
+#undef hypot
+#define hypot(__x, __y) __tg_hypot(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// ilogb
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(float __x) {return ilogbf(__x);}
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(double __x) {return ilogb(__x);}
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(long double __x) {return ilogbl(__x);}
+
+#undef ilogb
+#define ilogb(__x) __tg_ilogb(__tg_promote1((__x))(__x))
+
+// ldexp
+
+static float
+ _TG_ATTRS
+ __tg_ldexp(float __x, int __y) {return ldexpf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_ldexp(double __x, int __y) {return ldexp(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_ldexp(long double __x, int __y) {return ldexpl(__x, __y);}
+
+#undef ldexp
+#define ldexp(__x, __y) __tg_ldexp(__tg_promote1((__x))(__x), __y)
+
+// lgamma
+
+static float
+ _TG_ATTRS
+ __tg_lgamma(float __x) {return lgammaf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_lgamma(double __x) {return lgamma(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_lgamma(long double __x) {return lgammal(__x);}
+
+#undef lgamma
+#define lgamma(__x) __tg_lgamma(__tg_promote1((__x))(__x))
+
+// llrint
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(float __x) {return llrintf(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(double __x) {return llrint(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(long double __x) {return llrintl(__x);}
+
+#undef llrint
+#define llrint(__x) __tg_llrint(__tg_promote1((__x))(__x))
+
+// llround
+
+static long long
+ _TG_ATTRS
+ __tg_llround(float __x) {return llroundf(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llround(double __x) {return llround(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llround(long double __x) {return llroundl(__x);}
+
+#undef llround
+#define llround(__x) __tg_llround(__tg_promote1((__x))(__x))
+
+// log10
+
+static float
+ _TG_ATTRS
+ __tg_log10(float __x) {return log10f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log10(double __x) {return log10(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log10(long double __x) {return log10l(__x);}
+
+#undef log10
+#define log10(__x) __tg_log10(__tg_promote1((__x))(__x))
+
+// log1p
+
+static float
+ _TG_ATTRS
+ __tg_log1p(float __x) {return log1pf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log1p(double __x) {return log1p(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log1p(long double __x) {return log1pl(__x);}
+
+#undef log1p
+#define log1p(__x) __tg_log1p(__tg_promote1((__x))(__x))
+
+// log2
+
+static float
+ _TG_ATTRS
+ __tg_log2(float __x) {return log2f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log2(double __x) {return log2(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log2(long double __x) {return log2l(__x);}
+
+#undef log2
+#define log2(__x) __tg_log2(__tg_promote1((__x))(__x))
+
+// logb
+
+static float
+ _TG_ATTRS
+ __tg_logb(float __x) {return logbf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_logb(double __x) {return logb(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_logb(long double __x) {return logbl(__x);}
+
+#undef logb
+#define logb(__x) __tg_logb(__tg_promote1((__x))(__x))
+
+// lrint
+
+static long
+ _TG_ATTRS
+ __tg_lrint(float __x) {return lrintf(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lrint(double __x) {return lrint(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lrint(long double __x) {return lrintl(__x);}
+
+#undef lrint
+#define lrint(__x) __tg_lrint(__tg_promote1((__x))(__x))
+
+// lround
+
+static long
+ _TG_ATTRS
+ __tg_lround(float __x) {return lroundf(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lround(double __x) {return lround(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lround(long double __x) {return lroundl(__x);}
+
+#undef lround
+#define lround(__x) __tg_lround(__tg_promote1((__x))(__x))
+
+// nearbyint
+
+static float
+ _TG_ATTRS
+ __tg_nearbyint(float __x) {return nearbyintf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_nearbyint(double __x) {return nearbyint(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_nearbyint(long double __x) {return nearbyintl(__x);}
+
+#undef nearbyint
+#define nearbyint(__x) __tg_nearbyint(__tg_promote1((__x))(__x))
+
+// nextafter
+
+static float
+ _TG_ATTRS
+ __tg_nextafter(float __x, float __y) {return nextafterf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_nextafter(double __x, double __y) {return nextafter(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_nextafter(long double __x, long double __y) {return nextafterl(__x, __y);}
+
+#undef nextafter
+#define nextafter(__x, __y) __tg_nextafter(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// nexttoward
+
+static float
+ _TG_ATTRS
+ __tg_nexttoward(float __x, long double __y) {return nexttowardf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_nexttoward(double __x, long double __y) {return nexttoward(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_nexttoward(long double __x, long double __y) {return nexttowardl(__x, __y);}
+
+#undef nexttoward
+#define nexttoward(__x, __y) __tg_nexttoward(__tg_promote1((__x))(__x), (__y))
+
+// remainder
+
+static float
+ _TG_ATTRS
+ __tg_remainder(float __x, float __y) {return remainderf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_remainder(double __x, double __y) {return remainder(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_remainder(long double __x, long double __y) {return remainderl(__x, __y);}
+
+#undef remainder
+#define remainder(__x, __y) __tg_remainder(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// remquo
+
+static float
+ _TG_ATTRS
+ __tg_remquo(float __x, float __y, int* __z)
+ {return remquof(__x, __y, __z);}
+
+static double
+ _TG_ATTRS
+ __tg_remquo(double __x, double __y, int* __z)
+ {return remquo(__x, __y, __z);}
+
+static long double
+ _TG_ATTRS
+ __tg_remquo(long double __x,long double __y, int* __z)
+ {return remquol(__x, __y, __z);}
+
+#undef remquo
+#define remquo(__x, __y, __z) \
+ __tg_remquo(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y), \
+ (__z))
+
+// rint
+
+static float
+ _TG_ATTRS
+ __tg_rint(float __x) {return rintf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_rint(double __x) {return rint(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_rint(long double __x) {return rintl(__x);}
+
+#undef rint
+#define rint(__x) __tg_rint(__tg_promote1((__x))(__x))
+
+// round
+
+static float
+ _TG_ATTRS
+ __tg_round(float __x) {return roundf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_round(double __x) {return round(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_round(long double __x) {return roundl(__x);}
+
+#undef round
+#define round(__x) __tg_round(__tg_promote1((__x))(__x))
+
+// scalbn
+
+static float
+ _TG_ATTRS
+ __tg_scalbn(float __x, int __y) {return scalbnf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_scalbn(double __x, int __y) {return scalbn(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_scalbn(long double __x, int __y) {return scalbnl(__x, __y);}
+
+#undef scalbn
+#define scalbn(__x, __y) __tg_scalbn(__tg_promote1((__x))(__x), __y)
+
+// scalbln
+
+static float
+ _TG_ATTRS
+ __tg_scalbln(float __x, long __y) {return scalblnf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_scalbln(double __x, long __y) {return scalbln(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_scalbln(long double __x, long __y) {return scalblnl(__x, __y);}
+
+#undef scalbln
+#define scalbln(__x, __y) __tg_scalbln(__tg_promote1((__x))(__x), __y)
+
+// tgamma
+
+static float
+ _TG_ATTRS
+ __tg_tgamma(float __x) {return tgammaf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tgamma(double __x) {return tgamma(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tgamma(long double __x) {return tgammal(__x);}
+
+#undef tgamma
+#define tgamma(__x) __tg_tgamma(__tg_promote1((__x))(__x))
+
+// trunc
+
+static float
+ _TG_ATTRS
+ __tg_trunc(float __x) {return truncf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_trunc(double __x) {return trunc(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_trunc(long double __x) {return truncl(__x);}
+
+#undef trunc
+#define trunc(__x) __tg_trunc(__tg_promote1((__x))(__x))
+
+// carg
+
+static float
+ _TG_ATTRS
+ __tg_carg(float __x) {return atan2f(0.F, __x);}
+
+static double
+ _TG_ATTRS
+ __tg_carg(double __x) {return atan2(0., __x);}
+
+static long double
+ _TG_ATTRS
+ __tg_carg(long double __x) {return atan2l(0.L, __x);}
+
+static float
+ _TG_ATTRS
+ __tg_carg(float _Complex __x) {return cargf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_carg(double _Complex __x) {return carg(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_carg(long double _Complex __x) {return cargl(__x);}
+
+#undef carg
+#define carg(__x) __tg_carg(__tg_promote1((__x))(__x))
+
+// cimag
+
+static float
+ _TG_ATTRS
+ __tg_cimag(float __x) {return 0;}
+
+static double
+ _TG_ATTRS
+ __tg_cimag(double __x) {return 0;}
+
+static long double
+ _TG_ATTRS
+ __tg_cimag(long double __x) {return 0;}
+
+static float
+ _TG_ATTRS
+ __tg_cimag(float _Complex __x) {return cimagf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cimag(double _Complex __x) {return cimag(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cimag(long double _Complex __x) {return cimagl(__x);}
+
+#undef cimag
+#define cimag(__x) __tg_cimag(__tg_promote1((__x))(__x))
+
+// conj
+
+static float _Complex
+ _TG_ATTRS
+ __tg_conj(float __x) {return __x;}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_conj(double __x) {return __x;}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_conj(long double __x) {return __x;}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_conj(float _Complex __x) {return conjf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_conj(double _Complex __x) {return conj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_conj(long double _Complex __x) {return conjl(__x);}
+
+#undef conj
+#define conj(__x) __tg_conj(__tg_promote1((__x))(__x))
+
+// cproj
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cproj(float __x) {return cprojf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cproj(double __x) {return cproj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cproj(long double __x) {return cprojl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cproj(float _Complex __x) {return cprojf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cproj(double _Complex __x) {return cproj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cproj(long double _Complex __x) {return cprojl(__x);}
+
+#undef cproj
+#define cproj(__x) __tg_cproj(__tg_promote1((__x))(__x))
+
+// creal
+
+static float
+ _TG_ATTRS
+ __tg_creal(float __x) {return __x;}
+
+static double
+ _TG_ATTRS
+ __tg_creal(double __x) {return __x;}
+
+static long double
+ _TG_ATTRS
+ __tg_creal(long double __x) {return __x;}
+
+static float
+ _TG_ATTRS
+ __tg_creal(float _Complex __x) {return crealf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_creal(double _Complex __x) {return creal(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_creal(long double _Complex __x) {return creall(__x);}
+
+#undef creal
+#define creal(__x) __tg_creal(__tg_promote1((__x))(__x))
+
+#undef _TG_ATTRSp
+#undef _TG_ATTRS
+
+#endif /* __cplusplus */
+#endif /* __TGMATH_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/tmmintrin.h b/clang-2690385/lib64/clang/3.8/include/tmmintrin.h
new file mode 100644
index 00000000..0002890c
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/tmmintrin.h
@@ -0,0 +1,221 @@
+/*===---- tmmintrin.h - SSSE3 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __TMMINTRIN_H
+#define __TMMINTRIN_H
+
+#include <pmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3")))
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_abs_pi8(__m64 __a)
+{
+ return (__m64)__builtin_ia32_pabsb((__v8qi)__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi8(__m128i __a)
+{
+ return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_abs_pi16(__m64 __a)
+{
+ return (__m64)__builtin_ia32_pabsw((__v4hi)__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi16(__m128i __a)
+{
+ return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_abs_pi32(__m64 __a)
+{
+ return (__m64)__builtin_ia32_pabsd((__v2si)__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi32(__m128i __a)
+{
+ return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
+}
+
+#define _mm_alignr_epi8(a, b, n) __extension__ ({ \
+ (__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (n)); })
+
+#define _mm_alignr_pi8(a, b, n) __extension__ ({ \
+ (__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hadd_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hadd_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hadd_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hadd_pi32(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hadds_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hadds_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phaddsw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsub_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsub_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hsub_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phsubw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hsub_pi32(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hsubs_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phsubsw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maddubs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_maddubs_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mulhrs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mulhrs_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shuffle_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pshufb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_shuffle_pi8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pshufb((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sign_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sign_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sign_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sign_pi8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psignb((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sign_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psignw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sign_pi32(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psignd((__v2si)__a, (__v2si)__b);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __TMMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/unwind.h b/clang-2690385/lib64/clang/3.8/include/unwind.h
new file mode 100644
index 00000000..303d7928
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/unwind.h
@@ -0,0 +1,282 @@
+/*===---- unwind.h - Stack unwinding ----------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/
+
+#ifndef __CLANG_UNWIND_H
+#define __CLANG_UNWIND_H
+
+#if defined(__APPLE__) && __has_include_next(<unwind.h>)
+/* Darwin (from 11.x on) provide an unwind.h. If that's available,
+ * use it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
+ * so define that around the include.*/
+# ifndef _GNU_SOURCE
+# define _SHOULD_UNDEFINE_GNU_SOURCE
+# define _GNU_SOURCE
+# endif
+// libunwind's unwind.h reflects the current visibility. However, Mozilla
+// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the
+// visibility to default and export its contents. gcc also allows users to
+// override its override by #defining HIDE_EXPORTS (but note, this only obeys
+// the user's -fvisibility setting; it doesn't hide any exports on its own). We
+// imitate gcc's header here:
+# ifdef HIDE_EXPORTS
+# include_next <unwind.h>
+# else
+# pragma GCC visibility push(default)
+# include_next <unwind.h>
+# pragma GCC visibility pop
+# endif
+# ifdef _SHOULD_UNDEFINE_GNU_SOURCE
+# undef _GNU_SOURCE
+# undef _SHOULD_UNDEFINE_GNU_SOURCE
+# endif
+#else
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* It is a bit strange for a header to play with the visibility of the
+ symbols it declares, but this matches gcc's behavior and some programs
+ depend on it */
+#ifndef HIDE_EXPORTS
+#pragma GCC visibility push(default)
+#endif
+
+typedef uintptr_t _Unwind_Word;
+typedef intptr_t _Unwind_Sword;
+typedef uintptr_t _Unwind_Ptr;
+typedef uintptr_t _Unwind_Internal_Ptr;
+typedef uint64_t _Unwind_Exception_Class;
+
+typedef intptr_t _sleb128_t;
+typedef uintptr_t _uleb128_t;
+
+struct _Unwind_Context;
+struct _Unwind_Exception;
+typedef enum {
+ _URC_NO_REASON = 0,
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+
+ _URC_FATAL_PHASE2_ERROR = 2,
+ _URC_FATAL_PHASE1_ERROR = 3,
+ _URC_NORMAL_STOP = 4,
+
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8
+} _Unwind_Reason_Code;
+
+typedef enum {
+ _UA_SEARCH_PHASE = 1,
+ _UA_CLEANUP_PHASE = 2,
+
+ _UA_HANDLER_FRAME = 4,
+ _UA_FORCE_UNWIND = 8,
+ _UA_END_OF_STACK = 16 /* gcc extension to C++ ABI */
+} _Unwind_Action;
+
+typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
+ struct _Unwind_Exception *);
+
+struct _Unwind_Exception {
+ _Unwind_Exception_Class exception_class;
+ _Unwind_Exception_Cleanup_Fn exception_cleanup;
+ _Unwind_Word private_1;
+ _Unwind_Word private_2;
+ /* The Itanium ABI requires that _Unwind_Exception objects are "double-word
+ * aligned". GCC has interpreted this to mean "use the maximum useful
+ * alignment for the target"; so do we. */
+} __attribute__((__aligned__));
+
+typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)(int, _Unwind_Action,
+ _Unwind_Exception_Class,
+ struct _Unwind_Exception *,
+ struct _Unwind_Context *,
+ void *);
+
+typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn)(
+ int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
+ struct _Unwind_Context *);
+typedef _Unwind_Personality_Fn __personality_routine;
+
+typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
+ void *);
+
+#if defined(__arm__) && !defined(__APPLE__)
+
+typedef enum {
+ _UVRSC_CORE = 0, /* integer register */
+ _UVRSC_VFP = 1, /* vfp */
+ _UVRSC_WMMXD = 3, /* Intel WMMX data register */
+ _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+} _Unwind_VRS_RegClass;
+
+typedef enum {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5
+} _Unwind_VRS_DataRepresentation;
+
+typedef enum {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2
+} _Unwind_VRS_Result;
+
+_Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context,
+ _Unwind_VRS_RegClass __regclass,
+ uint32_t __regno,
+ _Unwind_VRS_DataRepresentation __representation,
+ void *__valuep);
+
+_Unwind_VRS_Result _Unwind_VRS_Set(struct _Unwind_Context *__context,
+ _Unwind_VRS_RegClass __regclass,
+ uint32_t __regno,
+ _Unwind_VRS_DataRepresentation __representation,
+ void *__valuep);
+
+static __inline__
+_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *__context, int __index) {
+ _Unwind_Word __value;
+ _Unwind_VRS_Get(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
+ return __value;
+}
+
+static __inline__
+void _Unwind_SetGR(struct _Unwind_Context *__context, int __index,
+ _Unwind_Word __value) {
+ _Unwind_VRS_Set(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
+}
+
+static __inline__
+_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *__context) {
+ _Unwind_Word __ip = _Unwind_GetGR(__context, 15);
+ return __ip & ~(_Unwind_Word)(0x1); /* Remove thumb mode bit. */
+}
+
+static __inline__
+void _Unwind_SetIP(struct _Unwind_Context *__context, _Unwind_Word __value) {
+ _Unwind_Word __thumb_mode_bit = _Unwind_GetGR(__context, 15) & 0x1;
+ _Unwind_SetGR(__context, 15, __value | __thumb_mode_bit);
+}
+#else
+_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *, int);
+void _Unwind_SetGR(struct _Unwind_Context *, int, _Unwind_Word);
+
+_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *);
+void _Unwind_SetIP(struct _Unwind_Context *, _Unwind_Word);
+#endif
+
+
+_Unwind_Word _Unwind_GetIPInfo(struct _Unwind_Context *, int *);
+
+_Unwind_Word _Unwind_GetCFA(struct _Unwind_Context *);
+
+_Unwind_Word _Unwind_GetBSP(struct _Unwind_Context *);
+
+void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context *);
+
+_Unwind_Ptr _Unwind_GetRegionStart(struct _Unwind_Context *);
+
+/* DWARF EH functions; currently not available on Darwin/ARM */
+#if !defined(__APPLE__) || !defined(__arm__)
+
+_Unwind_Reason_Code _Unwind_RaiseException(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_ForcedUnwind(struct _Unwind_Exception *,
+ _Unwind_Stop_Fn, void *);
+void _Unwind_DeleteException(struct _Unwind_Exception *);
+void _Unwind_Resume(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_Resume_or_Rethrow(struct _Unwind_Exception *);
+
+#endif
+
+_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void *);
+
+/* setjmp(3)/longjmp(3) stuff */
+typedef struct SjLj_Function_Context *_Unwind_FunctionContext_t;
+
+void _Unwind_SjLj_Register(_Unwind_FunctionContext_t);
+void _Unwind_SjLj_Unregister(_Unwind_FunctionContext_t);
+_Unwind_Reason_Code _Unwind_SjLj_RaiseException(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind(struct _Unwind_Exception *,
+ _Unwind_Stop_Fn, void *);
+void _Unwind_SjLj_Resume(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow(struct _Unwind_Exception *);
+
+void *_Unwind_FindEnclosingFunction(void *);
+
+#ifdef __APPLE__
+
+_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *)
+ __attribute__((__unavailable__));
+_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *)
+ __attribute__((__unavailable__));
+
+/* Darwin-specific functions */
+void __register_frame(const void *);
+void __deregister_frame(const void *);
+
+struct dwarf_eh_bases {
+ uintptr_t tbase;
+ uintptr_t dbase;
+ uintptr_t func;
+};
+void *_Unwind_Find_FDE(const void *, struct dwarf_eh_bases *);
+
+void __register_frame_info_bases(const void *, void *, void *, void *)
+ __attribute__((__unavailable__));
+void __register_frame_info(const void *, void *) __attribute__((__unavailable__));
+void __register_frame_info_table_bases(const void *, void*, void *, void *)
+ __attribute__((__unavailable__));
+void __register_frame_info_table(const void *, void *)
+ __attribute__((__unavailable__));
+void __register_frame_table(const void *) __attribute__((__unavailable__));
+void __deregister_frame_info(const void *) __attribute__((__unavailable__));
+void __deregister_frame_info_bases(const void *)__attribute__((__unavailable__));
+
+#else
+
+_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *);
+_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *);
+
+#endif
+
+
+#ifndef HIDE_EXPORTS
+#pragma GCC visibility pop
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+#endif /* __CLANG_UNWIND_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/vadefs.h b/clang-2690385/lib64/clang/3.8/include/vadefs.h
new file mode 100644
index 00000000..7fe9a74e
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/vadefs.h
@@ -0,0 +1,65 @@
+/* ===-------- vadefs.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we are aiming for MSVC compatibility. */
+#ifndef _MSC_VER
+#include_next <vadefs.h>
+#else
+
+#ifndef __clang_vadefs_h
+#define __clang_vadefs_h
+
+#include_next <vadefs.h>
+
+/* Override macros from vadefs.h with definitions that work with Clang. */
+#ifdef _crt_va_start
+#undef _crt_va_start
+#define _crt_va_start(ap, param) __builtin_va_start(ap, param)
+#endif
+#ifdef _crt_va_end
+#undef _crt_va_end
+#define _crt_va_end(ap) __builtin_va_end(ap)
+#endif
+#ifdef _crt_va_arg
+#undef _crt_va_arg
+#define _crt_va_arg(ap, type) __builtin_va_arg(ap, type)
+#endif
+
+/* VS 2015 switched to double underscore names, which is an improvement, but now
+ * we have to intercept those names too.
+ */
+#ifdef __crt_va_start
+#undef __crt_va_start
+#define __crt_va_start(ap, param) __builtin_va_start(ap, param)
+#endif
+#ifdef __crt_va_end
+#undef __crt_va_end
+#define __crt_va_end(ap) __builtin_va_end(ap)
+#endif
+#ifdef __crt_va_arg
+#undef __crt_va_arg
+#define __crt_va_arg(ap, type) __builtin_va_arg(ap, type)
+#endif
+
+#endif
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/varargs.h b/clang-2690385/lib64/clang/3.8/include/varargs.h
new file mode 100644
index 00000000..b5477d0a
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/varargs.h
@@ -0,0 +1,26 @@
+/*===---- varargs.h - Variable argument handling -------------------------------------===
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+* THE SOFTWARE.
+*
+*===-----------------------------------------------------------------------===
+*/
+#ifndef __VARARGS_H
+#define __VARARGS_H
+ #error "Please use <stdarg.h> instead of <varargs.h>"
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/vecintrin.h b/clang-2690385/lib64/clang/3.8/include/vecintrin.h
new file mode 100644
index 00000000..ca7acb47
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/vecintrin.h
@@ -0,0 +1,8946 @@
+/*===---- vecintrin.h - Vector intrinsics ----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if defined(__s390x__) && defined(__VEC__)
+
+#define __ATTRS_ai __attribute__((__always_inline__))
+#define __ATTRS_o __attribute__((__overloadable__))
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+#define __constant(PARM) \
+ __attribute__((__enable_if__ ((PARM) == (PARM), \
+ "argument must be a constant integer")))
+#define __constant_range(PARM, LOW, HIGH) \
+ __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH), \
+ "argument must be a constant integer from " #LOW " to " #HIGH)))
+#define __constant_pow2_range(PARM, LOW, HIGH) \
+ __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH) && \
+ ((PARM) & ((PARM) - 1)) == 0, \
+ "argument must be a constant power of 2 from " #LOW " to " #HIGH)))
+
+/*-- __lcbb -----------------------------------------------------------------*/
+
+extern __ATTRS_o unsigned int
+__lcbb(const void *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+#define __lcbb(X, Y) ((__typeof__((__lcbb)((X), (Y)))) \
+ __builtin_s390_lcbb((X), __builtin_constant_p((Y))? \
+ ((Y) == 64 ? 0 : \
+ (Y) == 128 ? 1 : \
+ (Y) == 256 ? 2 : \
+ (Y) == 512 ? 3 : \
+ (Y) == 1024 ? 4 : \
+ (Y) == 2048 ? 5 : \
+ (Y) == 4096 ? 6 : 0) : 0))
+
+/*-- vec_extract ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai signed char
+vec_extract(vector signed char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(vector bool char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(vector unsigned char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai signed short
+vec_extract(vector signed short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(vector bool short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(vector unsigned short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai signed int
+vec_extract(vector signed int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(vector bool int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(vector unsigned int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai signed long long
+vec_extract(vector signed long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(vector bool long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(vector unsigned long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai double
+vec_extract(vector double __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+/*-- vec_insert -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_insert(signed char __scalar, vector signed char __vec, int __index) {
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert(unsigned char __scalar, vector bool char __vec, int __index) {
+ vector unsigned char __newvec = (vector unsigned char)__vec;
+ __newvec[__index & 15] = (unsigned char)__scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert(unsigned char __scalar, vector unsigned char __vec, int __index) {
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_insert(signed short __scalar, vector signed short __vec, int __index) {
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert(unsigned short __scalar, vector bool short __vec, int __index) {
+ vector unsigned short __newvec = (vector unsigned short)__vec;
+ __newvec[__index & 7] = (unsigned short)__scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert(unsigned short __scalar, vector unsigned short __vec, int __index) {
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_insert(signed int __scalar, vector signed int __vec, int __index) {
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert(unsigned int __scalar, vector bool int __vec, int __index) {
+ vector unsigned int __newvec = (vector unsigned int)__vec;
+ __newvec[__index & 3] = __scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert(unsigned int __scalar, vector unsigned int __vec, int __index) {
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_insert(signed long long __scalar, vector signed long long __vec,
+ int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert(unsigned long long __scalar, vector bool long long __vec,
+ int __index) {
+ vector unsigned long long __newvec = (vector unsigned long long)__vec;
+ __newvec[__index & 1] = __scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert(unsigned long long __scalar, vector unsigned long long __vec,
+ int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_insert(double __scalar, vector double __vec, int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+/*-- vec_promote ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_promote(signed char __scalar, int __index) {
+ const vector signed char __zero = (vector signed char)0;
+ vector signed char __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_promote(unsigned char __scalar, int __index) {
+ const vector unsigned char __zero = (vector unsigned char)0;
+ vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_promote(signed short __scalar, int __index) {
+ const vector signed short __zero = (vector signed short)0;
+ vector signed short __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_promote(unsigned short __scalar, int __index) {
+ const vector unsigned short __zero = (vector unsigned short)0;
+ vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_promote(signed int __scalar, int __index) {
+ const vector signed int __zero = (vector signed int)0;
+ vector signed int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_promote(unsigned int __scalar, int __index) {
+ const vector unsigned int __zero = (vector unsigned int)0;
+ vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_promote(signed long long __scalar, int __index) {
+ const vector signed long long __zero = (vector signed long long)0;
+ vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_promote(unsigned long long __scalar, int __index) {
+ const vector unsigned long long __zero = (vector unsigned long long)0;
+ vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_promote(double __scalar, int __index) {
+ const vector double __zero = (vector double)0;
+ vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+/*-- vec_insert_and_zero ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_insert_and_zero(const signed char *__ptr) {
+ vector signed char __vec = (vector signed char)0;
+ __vec[7] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert_and_zero(const unsigned char *__ptr) {
+ vector unsigned char __vec = (vector unsigned char)0;
+ __vec[7] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_insert_and_zero(const signed short *__ptr) {
+ vector signed short __vec = (vector signed short)0;
+ __vec[3] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert_and_zero(const unsigned short *__ptr) {
+ vector unsigned short __vec = (vector unsigned short)0;
+ __vec[3] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_insert_and_zero(const signed int *__ptr) {
+ vector signed int __vec = (vector signed int)0;
+ __vec[1] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert_and_zero(const unsigned int *__ptr) {
+ vector unsigned int __vec = (vector unsigned int)0;
+ __vec[1] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_insert_and_zero(const signed long long *__ptr) {
+ vector signed long long __vec = (vector signed long long)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert_and_zero(const unsigned long long *__ptr) {
+ vector unsigned long long __vec = (vector unsigned long long)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_insert_and_zero(const double *__ptr) {
+ vector double __vec = (vector double)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+/*-- vec_perm ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_perm(vector signed char __a, vector signed char __b,
+ vector unsigned char __c) {
+ return (vector signed char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_perm(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector unsigned char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_perm(vector bool char __a, vector bool char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_perm(vector signed short __a, vector signed short __b,
+ vector unsigned char __c) {
+ return (vector signed short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_perm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned char __c) {
+ return (vector unsigned short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_perm(vector bool short __a, vector bool short __b,
+ vector unsigned char __c) {
+ return (vector bool short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_perm(vector signed int __a, vector signed int __b,
+ vector unsigned char __c) {
+ return (vector signed int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_perm(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned char __c) {
+ return (vector unsigned int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_perm(vector bool int __a, vector bool int __b,
+ vector unsigned char __c) {
+ return (vector bool int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_perm(vector signed long long __a, vector signed long long __b,
+ vector unsigned char __c) {
+ return (vector signed long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c) {
+ return (vector unsigned long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_perm(vector bool long long __a, vector bool long long __b,
+ vector unsigned char __c) {
+ return (vector bool long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_perm(vector double __a, vector double __b,
+ vector unsigned char __c) {
+ return (vector double)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+/*-- vec_permi --------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed long long
+vec_permi(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned long long
+vec_permi(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector bool long long
+vec_permi(vector bool long long __a, vector bool long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector double
+vec_permi(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 3);
+
+#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \
+ __builtin_s390_vpdi((vector unsigned long long)(X), \
+ (vector unsigned long long)(Y), \
+ (((Z) & 2) << 1) | ((Z) & 1)))
+
+/*-- vec_sel ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sel(vector signed char __a, vector signed char __b,
+ vector unsigned char __c) {
+ return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
+ return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
+ return ((vector bool char)__c & __b) | (~(vector bool char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sel(vector bool char __a, vector bool char __b, vector bool char __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sel(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sel(vector unsigned char __a, vector unsigned char __b,
+ vector bool char __c) {
+ return ((vector unsigned char)__c & __b) | (~(vector unsigned char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sel(vector signed short __a, vector signed short __b,
+ vector unsigned short __c) {
+ return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sel(vector signed short __a, vector signed short __b,
+ vector bool short __c) {
+ return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sel(vector bool short __a, vector bool short __b,
+ vector unsigned short __c) {
+ return ((vector bool short)__c & __b) | (~(vector bool short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+ vector bool short __c) {
+ return (((vector unsigned short)__c & __b) |
+ (~(vector unsigned short)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sel(vector signed int __a, vector signed int __b,
+ vector unsigned int __c) {
+ return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sel(vector signed int __a, vector signed int __b, vector bool int __c) {
+ return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
+ return ((vector bool int)__c & __b) | (~(vector bool int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sel(vector bool int __a, vector bool int __b, vector bool int __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sel(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
+ return ((vector unsigned int)__c & __b) | (~(vector unsigned int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sel(vector signed long long __a, vector signed long long __b,
+ vector unsigned long long __c) {
+ return (((vector signed long long)__c & __b) |
+ (~(vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sel(vector signed long long __a, vector signed long long __b,
+ vector bool long long __c) {
+ return (((vector signed long long)__c & __b) |
+ (~(vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sel(vector bool long long __a, vector bool long long __b,
+ vector unsigned long long __c) {
+ return (((vector bool long long)__c & __b) |
+ (~(vector bool long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sel(vector bool long long __a, vector bool long long __b,
+ vector bool long long __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sel(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sel(vector unsigned long long __a, vector unsigned long long __b,
+ vector bool long long __c) {
+ return (((vector unsigned long long)__c & __b) |
+ (~(vector unsigned long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
+ return (vector double)((__c & (vector unsigned long long)__b) |
+ (~__c & (vector unsigned long long)__a));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_sel(vector double __a, vector double __b, vector bool long long __c) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ vector unsigned long long __cc = (vector unsigned long long)__c;
+ return (vector double)((__cc & __bc) | (~__cc & __ac));
+}
+
+/*-- vec_gather_element -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed int
+vec_gather_element(vector signed int __vec, vector unsigned int __offset,
+ const signed int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const signed int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_gather_element(vector bool int __vec, vector unsigned int __offset,
+ const unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const unsigned int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gather_element(vector unsigned int __vec, vector unsigned int __offset,
+ const unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const unsigned int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_gather_element(vector signed long long __vec,
+ vector unsigned long long __offset,
+ const signed long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const signed long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_gather_element(vector bool long long __vec,
+ vector unsigned long long __offset,
+ const unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const unsigned long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gather_element(vector unsigned long long __vec,
+ vector unsigned long long __offset,
+ const unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const unsigned long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_gather_element(vector double __vec, vector unsigned long long __offset,
+ const double *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const double *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+/*-- vec_scatter_element ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector signed int __vec, vector unsigned int __offset,
+ signed int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(signed int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector bool int __vec, vector unsigned int __offset,
+ unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector unsigned int __vec, vector unsigned int __offset,
+ unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector signed long long __vec,
+ vector unsigned long long __offset,
+ signed long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(signed long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector bool long long __vec,
+ vector unsigned long long __offset,
+ unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector unsigned long long __vec,
+ vector unsigned long long __offset,
+ unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector double __vec, vector unsigned long long __offset,
+ double *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(double *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+/*-- vec_xld2 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_xld2(long __offset, const signed char *__ptr) {
+ return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_xld2(long __offset, const unsigned char *__ptr) {
+ return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_xld2(long __offset, const signed short *__ptr) {
+ return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_xld2(long __offset, const unsigned short *__ptr) {
+ return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_xld2(long __offset, const signed int *__ptr) {
+ return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_xld2(long __offset, const unsigned int *__ptr) {
+ return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_xld2(long __offset, const signed long long *__ptr) {
+ return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_xld2(long __offset, const unsigned long long *__ptr) {
+ return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_xld2(long __offset, const double *__ptr) {
+ return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+/*-- vec_xlw4 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_xlw4(long __offset, const signed char *__ptr) {
+ return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_xlw4(long __offset, const unsigned char *__ptr) {
+ return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_xlw4(long __offset, const signed short *__ptr) {
+ return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_xlw4(long __offset, const unsigned short *__ptr) {
+ return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_xlw4(long __offset, const signed int *__ptr) {
+ return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_xlw4(long __offset, const unsigned int *__ptr) {
+ return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+/*-- vec_xstd2 --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed char __vec, long __offset, signed char *__ptr) {
+ *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed short __vec, long __offset, signed short *__ptr) {
+ *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed int __vec, long __offset, signed int *__ptr) {
+ *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed long long __vec, long __offset,
+ signed long long *__ptr) {
+ *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned long long __vec, long __offset,
+ unsigned long long *__ptr) {
+ *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
+ __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector double __vec, long __offset, double *__ptr) {
+ *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+/*-- vec_xstw4 --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed char __vec, long __offset, signed char *__ptr) {
+ *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed short __vec, long __offset, signed short *__ptr) {
+ *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed int __vec, long __offset, signed int *__ptr) {
+ *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+/*-- vec_load_bndry ---------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_load_bndry(const signed char *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned char
+vec_load_bndry(const unsigned char *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed short
+vec_load_bndry(const signed short *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned short
+vec_load_bndry(const unsigned short *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed int
+vec_load_bndry(const signed int *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned int
+vec_load_bndry(const unsigned int *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed long long
+vec_load_bndry(const signed long long *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned long long
+vec_load_bndry(const unsigned long long *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector double
+vec_load_bndry(const double *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+#define vec_load_bndry(X, Y) ((__typeof__((vec_load_bndry)((X), (Y)))) \
+ __builtin_s390_vlbb((X), ((Y) == 64 ? 0 : \
+ (Y) == 128 ? 1 : \
+ (Y) == 256 ? 2 : \
+ (Y) == 512 ? 3 : \
+ (Y) == 1024 ? 4 : \
+ (Y) == 2048 ? 5 : \
+ (Y) == 4096 ? 6 : -1)))
+
+/*-- vec_load_len -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_load_len(const signed char *__ptr, unsigned int __len) {
+ return (vector signed char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_load_len(const unsigned char *__ptr, unsigned int __len) {
+ return (vector unsigned char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_load_len(const signed short *__ptr, unsigned int __len) {
+ return (vector signed short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_load_len(const unsigned short *__ptr, unsigned int __len) {
+ return (vector unsigned short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_load_len(const signed int *__ptr, unsigned int __len) {
+ return (vector signed int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_load_len(const unsigned int *__ptr, unsigned int __len) {
+ return (vector unsigned int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_load_len(const signed long long *__ptr, unsigned int __len) {
+ return (vector signed long long)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_load_len(const unsigned long long *__ptr, unsigned int __len) {
+ return (vector unsigned long long)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_load_len(const double *__ptr, unsigned int __len) {
+ return (vector double)__builtin_s390_vll(__len, __ptr);
+}
+
+/*-- vec_store_len ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed char __vec, signed char *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned char __vec, unsigned char *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed short __vec, signed short *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned short __vec, unsigned short *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed int __vec, signed int *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned int __vec, unsigned int *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed long long __vec, signed long long *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned long long __vec, unsigned long long *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector double __vec, double *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+/*-- vec_load_pair ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_load_pair(signed long long __a, signed long long __b) {
+ return (vector signed long long)(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_load_pair(unsigned long long __a, unsigned long long __b) {
+ return (vector unsigned long long)(__a, __b);
+}
+
+/*-- vec_genmask ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_genmask(unsigned short __mask)
+ __constant(__mask) {
+ return (vector unsigned char)(
+ __mask & 0x8000 ? 0xff : 0,
+ __mask & 0x4000 ? 0xff : 0,
+ __mask & 0x2000 ? 0xff : 0,
+ __mask & 0x1000 ? 0xff : 0,
+ __mask & 0x0800 ? 0xff : 0,
+ __mask & 0x0400 ? 0xff : 0,
+ __mask & 0x0200 ? 0xff : 0,
+ __mask & 0x0100 ? 0xff : 0,
+ __mask & 0x0080 ? 0xff : 0,
+ __mask & 0x0040 ? 0xff : 0,
+ __mask & 0x0020 ? 0xff : 0,
+ __mask & 0x0010 ? 0xff : 0,
+ __mask & 0x0008 ? 0xff : 0,
+ __mask & 0x0004 ? 0xff : 0,
+ __mask & 0x0002 ? 0xff : 0,
+ __mask & 0x0001 ? 0xff : 0);
+}
+
+/*-- vec_genmasks_* ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_genmasks_8(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 7;
+ unsigned char __bit2 = __last & 7;
+ unsigned char __mask1 = (unsigned char)(1U << (7 - __bit1) << 1) - 1;
+ unsigned char __mask2 = (unsigned char)(1U << (7 - __bit2)) - 1;
+ unsigned char __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned char)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_genmasks_16(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 15;
+ unsigned char __bit2 = __last & 15;
+ unsigned short __mask1 = (unsigned short)(1U << (15 - __bit1) << 1) - 1;
+ unsigned short __mask2 = (unsigned short)(1U << (15 - __bit2)) - 1;
+ unsigned short __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned short)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_genmasks_32(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 31;
+ unsigned char __bit2 = __last & 31;
+ unsigned int __mask1 = (1U << (31 - __bit1) << 1) - 1;
+ unsigned int __mask2 = (1U << (31 - __bit2)) - 1;
+ unsigned int __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned int)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_genmasks_64(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 63;
+ unsigned char __bit2 = __last & 63;
+ unsigned long long __mask1 = (1ULL << (63 - __bit1) << 1) - 1;
+ unsigned long long __mask2 = (1ULL << (63 - __bit2)) - 1;
+ unsigned long long __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned long long)__value;
+}
+
+/*-- vec_splat --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_splat(vector signed char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector signed char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_splat(vector bool char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector bool char)(vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_splat(vector unsigned char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_splat(vector signed short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector signed short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_splat(vector bool short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector bool short)(vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_splat(vector unsigned short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_splat(vector signed int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector signed int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_splat(vector bool int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector bool int)(vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_splat(vector unsigned int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_splat(vector signed long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector signed long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_splat(vector bool long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector bool long long)(vector unsigned long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_splat(vector unsigned long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector unsigned long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector double
+vec_splat(vector double __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector double)__vec[__index];
+}
+
+/*-- vec_splat_s* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector signed char
+vec_splat_s8(signed char __scalar)
+ __constant(__scalar) {
+ return (vector signed char)__scalar;
+}
+
+static inline __ATTRS_ai vector signed short
+vec_splat_s16(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed short)__scalar;
+}
+
+static inline __ATTRS_ai vector signed int
+vec_splat_s32(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai vector signed long long
+vec_splat_s64(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed long long)(signed long)__scalar;
+}
+
+/*-- vec_splat_u* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_splat_u8(unsigned char __scalar)
+ __constant(__scalar) {
+ return (vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned short
+vec_splat_u16(unsigned short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned int
+vec_splat_u32(signed short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned long long
+vec_splat_u64(signed short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned long long)(signed long long)__scalar;
+}
+
+/*-- vec_splats -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_splats(signed char __scalar) {
+ return (vector signed char)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_splats(unsigned char __scalar) {
+ return (vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_splats(signed short __scalar) {
+ return (vector signed short)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_splats(unsigned short __scalar) {
+ return (vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_splats(signed int __scalar) {
+ return (vector signed int)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_splats(unsigned int __scalar) {
+ return (vector unsigned int)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_splats(signed long long __scalar) {
+ return (vector signed long long)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_splats(unsigned long long __scalar) {
+ return (vector unsigned long long)__scalar;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_splats(double __scalar) {
+ return (vector double)__scalar;
+}
+
+/*-- vec_extend_s64 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed char __a) {
+ return (vector signed long long)(__a[7], __a[15]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed short __a) {
+ return (vector signed long long)(__a[3], __a[7]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed int __a) {
+ return (vector signed long long)(__a[1], __a[3]);
+}
+
+/*-- vec_mergeh -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mergeh(vector signed char __a, vector signed char __b) {
+ return (vector signed char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_mergeh(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mergeh(vector signed short __a, vector signed short __b) {
+ return (vector signed short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_mergeh(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mergeh(vector signed int __a, vector signed int __b) {
+ return (vector signed int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_mergeh(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mergeh(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_mergeh(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_mergeh(vector double __a, vector double __b) {
+ return (vector double)(__a[0], __b[0]);
+}
+
+/*-- vec_mergel -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mergel(vector signed char __a, vector signed char __b) {
+ return (vector signed char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_mergel(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mergel(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mergel(vector signed short __a, vector signed short __b) {
+ return (vector signed short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_mergel(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mergel(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mergel(vector signed int __a, vector signed int __b) {
+ return (vector signed int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_mergel(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mergel(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mergel(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_mergel(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_mergel(vector double __a, vector double __b) {
+ return (vector double)(__a[1], __b[1]);
+}
+
+/*-- vec_pack ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_pack(vector signed short __a, vector signed short __b) {
+ vector signed char __ac = (vector signed char)__a;
+ vector signed char __bc = (vector signed char)__b;
+ return (vector signed char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_pack(vector bool short __a, vector bool short __b) {
+ vector bool char __ac = (vector bool char)__a;
+ vector bool char __bc = (vector bool char)__b;
+ return (vector bool char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_pack(vector unsigned short __a, vector unsigned short __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return (vector unsigned char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_pack(vector signed int __a, vector signed int __b) {
+ vector signed short __ac = (vector signed short)__a;
+ vector signed short __bc = (vector signed short)__b;
+ return (vector signed short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_pack(vector bool int __a, vector bool int __b) {
+ vector bool short __ac = (vector bool short)__a;
+ vector bool short __bc = (vector bool short)__b;
+ return (vector bool short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_pack(vector unsigned int __a, vector unsigned int __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return (vector unsigned short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_pack(vector signed long long __a, vector signed long long __b) {
+ vector signed int __ac = (vector signed int)__a;
+ vector signed int __bc = (vector signed int)__b;
+ return (vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_pack(vector bool long long __a, vector bool long long __b) {
+ vector bool int __ac = (vector bool int)__a;
+ vector bool int __bc = (vector bool int)__b;
+ return (vector bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return (vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+/*-- vec_packs --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_packs(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vpksh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_packs(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vpksf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packs(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_packs(vector signed long long __a, vector signed long long __b) {
+ return __builtin_s390_vpksg(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packs_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_packs_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return __builtin_s390_vpkshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packs_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_packs_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return __builtin_s390_vpksfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packs_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_packs_cc(vector signed long long __a, vector signed long long __b,
+ int *__cc) {
+ return __builtin_s390_vpksgs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packs_cc(vector unsigned long long __a, vector unsigned long long __b,
+ int *__cc) {
+ return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_packsu -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu(vector signed short __a, vector signed short __b) {
+ const vector signed short __zero = (vector signed short)0;
+ return __builtin_s390_vpklsh(
+ (vector unsigned short)(__a >= __zero) & (vector unsigned short)__a,
+ (vector unsigned short)(__b >= __zero) & (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu(vector signed int __a, vector signed int __b) {
+ const vector signed int __zero = (vector signed int)0;
+ return __builtin_s390_vpklsf(
+ (vector unsigned int)(__a >= __zero) & (vector unsigned int)__a,
+ (vector unsigned int)(__b >= __zero) & (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu(vector signed long long __a, vector signed long long __b) {
+ const vector signed long long __zero = (vector signed long long)0;
+ return __builtin_s390_vpklsg(
+ (vector unsigned long long)(__a >= __zero) &
+ (vector unsigned long long)__a,
+ (vector unsigned long long)(__b >= __zero) &
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packsu_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu_cc(vector unsigned long long __a, vector unsigned long long __b,
+ int *__cc) {
+ return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_unpackh ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_unpackh(vector signed char __a) {
+ return __builtin_s390_vuphb(__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_unpackh(vector bool char __a) {
+ return (vector bool short)__builtin_s390_vuphb((vector signed char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_unpackh(vector unsigned char __a) {
+ return __builtin_s390_vuplhb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_unpackh(vector signed short __a) {
+ return __builtin_s390_vuphh(__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_unpackh(vector bool short __a) {
+ return (vector bool int)__builtin_s390_vuphh((vector signed short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_unpackh(vector unsigned short __a) {
+ return __builtin_s390_vuplhh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_unpackh(vector signed int __a) {
+ return __builtin_s390_vuphf(__a);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_unpackh(vector bool int __a) {
+ return (vector bool long long)__builtin_s390_vuphf((vector signed int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_unpackh(vector unsigned int __a) {
+ return __builtin_s390_vuplhf(__a);
+}
+
+/*-- vec_unpackl ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_unpackl(vector signed char __a) {
+ return __builtin_s390_vuplb(__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_unpackl(vector bool char __a) {
+ return (vector bool short)__builtin_s390_vuplb((vector signed char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_unpackl(vector unsigned char __a) {
+ return __builtin_s390_vupllb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_unpackl(vector signed short __a) {
+ return __builtin_s390_vuplhw(__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_unpackl(vector bool short __a) {
+ return (vector bool int)__builtin_s390_vuplhw((vector signed short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_unpackl(vector unsigned short __a) {
+ return __builtin_s390_vupllh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_unpackl(vector signed int __a) {
+ return __builtin_s390_vuplf(__a);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_unpackl(vector bool int __a) {
+ return (vector bool long long)__builtin_s390_vuplf((vector signed int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_unpackl(vector unsigned int __a) {
+ return __builtin_s390_vupllf(__a);
+}
+
+/*-- vec_cmpeq --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector double __a, vector double __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+/*-- vec_cmpge --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpge(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpge(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpge(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector double __a, vector double __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+/*-- vec_cmpgt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpgt(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpgt(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpgt(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector double __a, vector double __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+/*-- vec_cmple --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmple(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmple(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmple(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmple(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmple(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmple(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector double __a, vector double __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+/*-- vec_cmplt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmplt(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmplt(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmplt(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector double __a, vector double __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+/*-- vec_all_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_nge ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_ngt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_ngt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nle ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nle(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nlt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nlt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nan ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nan(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_numeric --------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_numeric(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_any_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_nge ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_ngt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_ngt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nle ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nle(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nlt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nlt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nan ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nan(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc != 3;
+}
+
+/*-- vec_any_numeric --------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_numeric(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_andc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_andc(vector bool char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector signed char __a, vector signed char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector bool char __a, vector signed char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector signed char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector unsigned char __a, vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector bool char __a, vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector unsigned char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_andc(vector bool short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector signed short __a, vector signed short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector bool short __a, vector signed short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector signed short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector unsigned short __a, vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector bool short __a, vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector unsigned short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_andc(vector bool int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector signed int __a, vector signed int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector bool int __a, vector signed int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector signed int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector unsigned int __a, vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector bool int __a, vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector unsigned int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_andc(vector bool long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector signed long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector bool long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector signed long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector bool long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector unsigned long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector double __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector bool long long __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector double __a, vector bool long long __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+/*-- vec_nor ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_nor(vector bool char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector signed char __a, vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector bool char __a, vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector signed char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector unsigned char __a, vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector bool char __a, vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector unsigned char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_nor(vector bool short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector signed short __a, vector signed short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector bool short __a, vector signed short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector signed short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector unsigned short __a, vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector bool short __a, vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector unsigned short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_nor(vector bool int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector signed int __a, vector signed int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector bool int __a, vector signed int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector signed int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector unsigned int __a, vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector bool int __a, vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector unsigned int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_nor(vector bool long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector signed long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector bool long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector signed long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector bool long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector unsigned long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector double __a, vector double __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector bool long long __a, vector double __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector double __a, vector bool long long __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+/*-- vec_cntlz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cntlz(vector signed char __a) {
+ return __builtin_s390_vclzb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cntlz(vector unsigned char __a) {
+ return __builtin_s390_vclzb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cntlz(vector signed short __a) {
+ return __builtin_s390_vclzh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cntlz(vector unsigned short __a) {
+ return __builtin_s390_vclzh(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cntlz(vector signed int __a) {
+ return __builtin_s390_vclzf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cntlz(vector unsigned int __a) {
+ return __builtin_s390_vclzf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cntlz(vector signed long long __a) {
+ return __builtin_s390_vclzg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cntlz(vector unsigned long long __a) {
+ return __builtin_s390_vclzg(__a);
+}
+
+/*-- vec_cnttz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cnttz(vector signed char __a) {
+ return __builtin_s390_vctzb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cnttz(vector unsigned char __a) {
+ return __builtin_s390_vctzb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cnttz(vector signed short __a) {
+ return __builtin_s390_vctzh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cnttz(vector unsigned short __a) {
+ return __builtin_s390_vctzh(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cnttz(vector signed int __a) {
+ return __builtin_s390_vctzf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cnttz(vector unsigned int __a) {
+ return __builtin_s390_vctzf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cnttz(vector signed long long __a) {
+ return __builtin_s390_vctzg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cnttz(vector unsigned long long __a) {
+ return __builtin_s390_vctzg(__a);
+}
+
+/*-- vec_popcnt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_popcnt(vector signed char __a) {
+ return __builtin_s390_vpopctb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_popcnt(vector unsigned char __a) {
+ return __builtin_s390_vpopctb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_popcnt(vector signed short __a) {
+ return __builtin_s390_vpopcth((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_popcnt(vector unsigned short __a) {
+ return __builtin_s390_vpopcth(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_popcnt(vector signed int __a) {
+ return __builtin_s390_vpopctf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_popcnt(vector unsigned int __a) {
+ return __builtin_s390_vpopctf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_popcnt(vector signed long long __a) {
+ return __builtin_s390_vpopctg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_popcnt(vector unsigned long long __a) {
+ return __builtin_s390_vpopctg(__a);
+}
+
+/*-- vec_rl -----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_rl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_verllvb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_rl(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_verllvb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_rl(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_verllvh(
+ (vector unsigned short)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_rl(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_verllvh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_rl(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_verllvf(
+ (vector unsigned int)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_rl(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_verllvf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_rl(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_verllvg(
+ (vector unsigned long long)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_verllvg(__a, __b);
+}
+
+/*-- vec_rli ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_rli(vector signed char __a, unsigned long __b) {
+ return (vector signed char)__builtin_s390_verllb(
+ (vector unsigned char)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_rli(vector unsigned char __a, unsigned long __b) {
+ return __builtin_s390_verllb(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_rli(vector signed short __a, unsigned long __b) {
+ return (vector signed short)__builtin_s390_verllh(
+ (vector unsigned short)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_rli(vector unsigned short __a, unsigned long __b) {
+ return __builtin_s390_verllh(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_rli(vector signed int __a, unsigned long __b) {
+ return (vector signed int)__builtin_s390_verllf(
+ (vector unsigned int)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_rli(vector unsigned int __a, unsigned long __b) {
+ return __builtin_s390_verllf(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_rli(vector signed long long __a, unsigned long __b) {
+ return (vector signed long long)__builtin_s390_verllg(
+ (vector unsigned long long)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_rli(vector unsigned long long __a, unsigned long __b) {
+ return __builtin_s390_verllg(__a, (int)__b);
+}
+
+/*-- vec_rl_mask ------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_rl_mask(vector signed char __a, vector unsigned char __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned char
+vec_rl_mask(vector unsigned char __a, vector unsigned char __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed short
+vec_rl_mask(vector signed short __a, vector unsigned short __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned short
+vec_rl_mask(vector unsigned short __a, vector unsigned short __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed int
+vec_rl_mask(vector signed int __a, vector unsigned int __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned int
+vec_rl_mask(vector unsigned int __a, vector unsigned int __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed long long
+vec_rl_mask(vector signed long long __a, vector unsigned long long __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned long long
+vec_rl_mask(vector unsigned long long __a, vector unsigned long long __b,
+ unsigned char __c) __constant(__c);
+
+#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \
+ __extension__ ({ \
+ vector unsigned char __res; \
+ vector unsigned char __x = (vector unsigned char)(X); \
+ vector unsigned char __y = (vector unsigned char)(Y); \
+ switch (sizeof ((X)[0])) { \
+ case 1: __res = (vector unsigned char) __builtin_s390_verimb( \
+ (vector unsigned char)__x, (vector unsigned char)__x, \
+ (vector unsigned char)__y, (Z)); break; \
+ case 2: __res = (vector unsigned char) __builtin_s390_verimh( \
+ (vector unsigned short)__x, (vector unsigned short)__x, \
+ (vector unsigned short)__y, (Z)); break; \
+ case 4: __res = (vector unsigned char) __builtin_s390_verimf( \
+ (vector unsigned int)__x, (vector unsigned int)__x, \
+ (vector unsigned int)__y, (Z)); break; \
+ default: __res = (vector unsigned char) __builtin_s390_verimg( \
+ (vector unsigned long long)__x, (vector unsigned long long)__x, \
+ (vector unsigned long long)__y, (Z)); break; \
+ } __res; }))
+
+/*-- vec_sll ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsl(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_slb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_slb(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_slb(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vslb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_slb(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vslb(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_slb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vslb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_slb(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_slb(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_slb(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_slb(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_slb(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_slb(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_slb(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_slb(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_slb(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_slb(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_slb(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_slb(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_slb(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_slb(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_sld ----------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_sld(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned char
+vec_sld(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed short
+vec_sld(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned short
+vec_sld(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed int
+vec_sld(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned int
+vec_sld(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed long long
+vec_sld(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned long long
+vec_sld(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector double
+vec_sld(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 15);
+
+#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \
+ __builtin_s390_vsldb((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z)))
+
+/*-- vec_sldw ---------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_sldw(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned char
+vec_sldw(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed short
+vec_sldw(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned short
+vec_sldw(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed int
+vec_sldw(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned int
+vec_sldw(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed long long
+vec_sldw(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned long long
+vec_sldw(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector double
+vec_sldw(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 3);
+
+#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \
+ __builtin_s390_vsldb((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z) * 4))
+
+/*-- vec_sral ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsra(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srab ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srab(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srab(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrab(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srab(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vsrab(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srab(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrab(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srab(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srab(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srab(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srab(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srab(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srab(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srab(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srab(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srab(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srab(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srab(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srab(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srab(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srab(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srl ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrl(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srb(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srb(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srb(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vsrlb(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrlb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srb(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srb(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srb(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srb(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srb(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srb(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srb(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srb(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srb(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srb(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srb(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srb(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srb(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srb(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_abs ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_abs(vector signed char __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed char)0));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_abs(vector signed short __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed short)0));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_abs(vector signed int __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed int)0));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_abs(vector signed long long __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed long long)0));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_abs(vector double __a) {
+ return __builtin_s390_vflpdb(__a);
+}
+
+/*-- vec_nabs ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_nabs(vector double __a) {
+ return __builtin_s390_vflndb(__a);
+}
+
+/*-- vec_max ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector signed char __a, vector signed char __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector signed char __a, vector bool char __b) {
+ vector signed char __bc = (vector signed char)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector bool char __a, vector signed char __b) {
+ vector signed char __ac = (vector signed char)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector unsigned char __a, vector unsigned char __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector unsigned char __a, vector bool char __b) {
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector bool char __a, vector unsigned char __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector signed short __a, vector signed short __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector signed short __a, vector bool short __b) {
+ vector signed short __bc = (vector signed short)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector bool short __a, vector signed short __b) {
+ vector signed short __ac = (vector signed short)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector unsigned short __a, vector unsigned short __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector unsigned short __a, vector bool short __b) {
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector bool short __a, vector unsigned short __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector signed int __a, vector signed int __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector signed int __a, vector bool int __b) {
+ vector signed int __bc = (vector signed int)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector bool int __a, vector signed int __b) {
+ vector signed int __ac = (vector signed int)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector unsigned int __a, vector unsigned int __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector unsigned int __a, vector bool int __b) {
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector bool int __a, vector unsigned int __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector signed long long __a, vector signed long long __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector signed long long __a, vector bool long long __b) {
+ vector signed long long __bc = (vector signed long long)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector bool long long __a, vector signed long long __b) {
+ vector signed long long __ac = (vector signed long long)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector unsigned long long __a, vector bool long long __b) {
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector bool long long __a, vector unsigned long long __b) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_max(vector double __a, vector double __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+/*-- vec_min ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector signed char __a, vector signed char __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector signed char __a, vector bool char __b) {
+ vector signed char __bc = (vector signed char)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector bool char __a, vector signed char __b) {
+ vector signed char __ac = (vector signed char)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector unsigned char __a, vector unsigned char __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector unsigned char __a, vector bool char __b) {
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector bool char __a, vector unsigned char __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector signed short __a, vector signed short __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector signed short __a, vector bool short __b) {
+ vector signed short __bc = (vector signed short)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector bool short __a, vector signed short __b) {
+ vector signed short __ac = (vector signed short)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector unsigned short __a, vector unsigned short __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector unsigned short __a, vector bool short __b) {
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector bool short __a, vector unsigned short __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector signed int __a, vector signed int __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector signed int __a, vector bool int __b) {
+ vector signed int __bc = (vector signed int)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector bool int __a, vector signed int __b) {
+ vector signed int __ac = (vector signed int)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector unsigned int __a, vector unsigned int __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector unsigned int __a, vector bool int __b) {
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector bool int __a, vector unsigned int __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector signed long long __a, vector signed long long __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector signed long long __a, vector bool long long __b) {
+ vector signed long long __bc = (vector signed long long)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector bool long long __a, vector signed long long __b) {
+ vector signed long long __ac = (vector signed long long)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector unsigned long long __a, vector bool long long __b) {
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector bool long long __a, vector unsigned long long __b) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_min(vector double __a, vector double __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+/*-- vec_add_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaq(__a, __b);
+}
+
+/*-- vec_addc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_addc(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaccb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_addc(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vacch(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_addc(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vaccf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_addc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vaccg(__a, __b);
+}
+
+/*-- vec_addc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaccq(__a, __b);
+}
+
+/*-- vec_adde_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vacq(__a, __b, __c);
+}
+
+/*-- vec_addec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vacccq(__a, __b, __c);
+}
+
+/*-- vec_avg ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_avg(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vavgb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_avg(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vavgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_avg(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vavgf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_avg(vector signed long long __a, vector signed long long __b) {
+ return __builtin_s390_vavgg(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_avg(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vavglb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_avg(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vavglh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_avg(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vavglf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_avg(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vavglg(__a, __b);
+}
+
+/*-- vec_checksum -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned int
+vec_checksum(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vcksm(__a, __b);
+}
+
+/*-- vec_gfmsum -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_gfmsum(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vgfmb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gfmsum(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vgfmh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gfmsum(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vgfmf(__a, __b);
+}
+
+/*-- vec_gfmsum_128 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_gfmsum_128(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vgfmg(__a, __b);
+}
+
+/*-- vec_gfmsum_accum -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_gfmsum_accum(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vgfmab(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gfmsum_accum(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vgfmah(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gfmsum_accum(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vgfmaf(__a, __b, __c);
+}
+
+/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_gfmsum_accum_128(vector unsigned long long __a,
+ vector unsigned long long __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vgfmag(__a, __b, __c);
+}
+
+/*-- vec_mladd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector signed char __a, vector signed char __b,
+ vector signed char __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector unsigned char __a, vector signed char __b,
+ vector signed char __c) {
+ return (vector signed char)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector signed char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __a * (vector signed char)__b + (vector signed char)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mladd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector unsigned short __a, vector signed short __b,
+ vector signed short __c) {
+ return (vector signed short)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector signed short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * (vector signed short)__b + (vector signed short)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mladd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector unsigned int __a, vector signed int __b,
+ vector signed int __c) {
+ return (vector signed int)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector signed int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __a * (vector signed int)__b + (vector signed int)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mladd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __a * __b + __c;
+}
+
+/*-- vec_mhadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mhadd(vector signed char __a, vector signed char __b,
+ vector signed char __c) {
+ return __builtin_s390_vmahb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mhadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vmalhb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mhadd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __builtin_s390_vmahh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mhadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmalhh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mhadd(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ return __builtin_s390_vmahf(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mhadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmalhf(__a, __b, __c);
+}
+
+/*-- vec_meadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_meadd(vector signed char __a, vector signed char __b,
+ vector signed short __c) {
+ return __builtin_s390_vmaeb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_meadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmaleb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_meadd(vector signed short __a, vector signed short __b,
+ vector signed int __c) {
+ return __builtin_s390_vmaeh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_meadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmaleh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_meadd(vector signed int __a, vector signed int __b,
+ vector signed long long __c) {
+ return __builtin_s390_vmaef(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_meadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vmalef(__a, __b, __c);
+}
+
+/*-- vec_moadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_moadd(vector signed char __a, vector signed char __b,
+ vector signed short __c) {
+ return __builtin_s390_vmaob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_moadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmalob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_moadd(vector signed short __a, vector signed short __b,
+ vector signed int __c) {
+ return __builtin_s390_vmaoh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_moadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmaloh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_moadd(vector signed int __a, vector signed int __b,
+ vector signed long long __c) {
+ return __builtin_s390_vmaof(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_moadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vmalof(__a, __b, __c);
+}
+
+/*-- vec_mulh ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mulh(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mulh(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmlhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mulh(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mulh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmlhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mulh(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmhf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mulh(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlhf(__a, __b);
+}
+
+/*-- vec_mule ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_mule(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mule(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmleb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mule(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mule(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmleh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mule(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmef(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mule(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlef(__a, __b);
+}
+
+/*-- vec_mulo ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_mulo(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmob(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mulo(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmlob(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mulo(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmoh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mulo(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmloh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mulo(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmof(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mulo(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlof(__a, __b);
+}
+
+/*-- vec_sub_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsq(__a, __b);
+}
+
+/*-- vec_subc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_subc(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vscbib(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_subc(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vscbih(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_subc(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vscbif(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_subc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vscbig(__a, __b);
+}
+
+/*-- vec_subc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vscbiq(__a, __b);
+}
+
+/*-- vec_sube_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vsbiq(__a, __b, __c);
+}
+
+/*-- vec_subec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vsbcbiq(__a, __b, __c);
+}
+
+/*-- vec_sum2 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sum2(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vsumgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sum2(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vsumgf(__a, __b);
+}
+
+/*-- vec_sum_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sum_u128(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vsumqf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sum_u128(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vsumqg(__a, __b);
+}
+
+/*-- vec_sum4 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sum4(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsumb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sum4(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vsumh(__a, __b);
+}
+
+/*-- vec_test_mask ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed char __a, vector unsigned char __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vtm(__a, __b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed short __a, vector unsigned short __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed int __a, vector unsigned int __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector double __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+/*-- vec_madd ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_madd(vector double __a, vector double __b, vector double __c) {
+ return __builtin_s390_vfmadb(__a, __b, __c);
+}
+
+/*-- vec_msub ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_msub(vector double __a, vector double __b, vector double __c) {
+ return __builtin_s390_vfmsdb(__a, __b, __c);
+}
+
+/*-- vec_sqrt ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_sqrt(vector double __a) {
+ return __builtin_s390_vfsqdb(__a);
+}
+
+/*-- vec_ld2f ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_ld2f(const float *__ptr) {
+ typedef float __v2f32 __attribute__((__vector_size__(8)));
+ return __builtin_convertvector(*(const __v2f32 *)__ptr, vector double);
+}
+
+/*-- vec_st2f ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai void
+vec_st2f(vector double __a, float *__ptr) {
+ typedef float __v2f32 __attribute__((__vector_size__(8)));
+ *(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32);
+}
+
+/*-- vec_ctd ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector double
+vec_ctd(vector signed long long __a, int __b)
+ __constant_range(__b, 0, 31) {
+ vector double __conv = __builtin_convertvector(__a, vector double);
+ __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ return __conv;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_ctd(vector unsigned long long __a, int __b)
+ __constant_range(__b, 0, 31) {
+ vector double __conv = __builtin_convertvector(__a, vector double);
+ __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ return __conv;
+}
+
+/*-- vec_ctsl ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_ctsl(vector double __a, int __b)
+ __constant_range(__b, 0, 31) {
+ __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+ return __builtin_convertvector(__a, vector signed long long);
+}
+
+/*-- vec_ctul ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_ctul(vector double __a, int __b)
+ __constant_range(__b, 0, 31) {
+ __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+ return __builtin_convertvector(__a, vector unsigned long long);
+}
+
+/*-- vec_roundp -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundp(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_ceil ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_ceil(vector double __a) {
+ // On this platform, vec_ceil never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_roundm -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundm(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_floor --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_floor(vector double __a) {
+ // On this platform, vec_floor never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_roundz -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundz(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_trunc --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_trunc(vector double __a) {
+ // On this platform, vec_trunc never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_roundc -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundc(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 0);
+}
+
+/*-- vec_round --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_round(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 4);
+}
+
+/*-- vec_fp_test_data_class -------------------------------------------------*/
+
+#define vec_fp_test_data_class(X, Y, Z) \
+ ((vector bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
+
+/*-- vec_cp_until_zero ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cp_until_zero(vector signed char __a) {
+ return (vector signed char)__builtin_s390_vistrb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cp_until_zero(vector bool char __a) {
+ return (vector bool char)__builtin_s390_vistrb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cp_until_zero(vector unsigned char __a) {
+ return __builtin_s390_vistrb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cp_until_zero(vector signed short __a) {
+ return (vector signed short)__builtin_s390_vistrh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cp_until_zero(vector bool short __a) {
+ return (vector bool short)__builtin_s390_vistrh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cp_until_zero(vector unsigned short __a) {
+ return __builtin_s390_vistrh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cp_until_zero(vector signed int __a) {
+ return (vector signed int)__builtin_s390_vistrf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cp_until_zero(vector bool int __a) {
+ return (vector bool int)__builtin_s390_vistrf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cp_until_zero(vector unsigned int __a) {
+ return __builtin_s390_vistrf(__a);
+}
+
+/*-- vec_cp_until_zero_cc ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cp_until_zero_cc(vector signed char __a, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cp_until_zero_cc(vector bool char __a, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cp_until_zero_cc(vector unsigned char __a, int *__cc) {
+ return __builtin_s390_vistrbs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cp_until_zero_cc(vector signed short __a, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cp_until_zero_cc(vector bool short __a, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cp_until_zero_cc(vector unsigned short __a, int *__cc) {
+ return __builtin_s390_vistrhs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cp_until_zero_cc(vector signed int __a, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vistrfs((vector unsigned int)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cp_until_zero_cc(vector bool int __a, int *__cc) {
+ return (vector bool int)__builtin_s390_vistrfs((vector unsigned int)__a,
+ __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cp_until_zero_cc(vector unsigned int __a, int *__cc) {
+ return __builtin_s390_vistrfs(__a, __cc);
+}
+
+/*-- vec_cmpeq_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeeb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeeb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeeh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeeh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfeef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfeef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfeef(__a, __b);
+}
+
+/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfeebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfeebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfeebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfeehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfeehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfeehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfeefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfeefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vfeefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfeezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfeezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfeezf(__a, __b);
+}
+
+/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfeezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfeezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfeezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfeezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfeezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfeezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfeezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfeezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfeezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeneb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeneb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeneb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeneh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeneh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeneh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfenef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfenef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfenef(__a, __b);
+}
+
+/*-- vec_cmpne_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfenebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfenebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfenebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfenehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfenehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfenehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfenefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfenefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vfenefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfenezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfenezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfenezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfenezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfenezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfenezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfenezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfenezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfenezf(__a, __b);
+}
+
+/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfenezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfenezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfenezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfenezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfenezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfenezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfenezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfenezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfenezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmprg --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmprg(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmprg(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmprg(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
+}
+
+/*-- vec_cmprg_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmprg_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmprg_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmprg_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
+}
+
+/*-- vec_cmprg_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrcb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrch(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrcf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrczb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrczh(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrczf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmpnrg -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpnrg(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpnrg(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpnrg(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
+}
+
+/*-- vec_cmpnrg_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpnrg_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpnrg_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpnrg_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
+}
+
+/*-- vec_cmpnrg_idx ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrcb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrch(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrcf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrczb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrczh(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrczf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_find_any_eq --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector signed char __a, vector signed char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector bool char __a, vector bool char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vfaeb(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector signed short __a, vector signed short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector bool short __a, vector bool short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vfaeh(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector signed int __a, vector signed int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector bool int __a, vector bool int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vfaef(__a, __b, 4);
+}
+
+/*-- vec_find_any_eq_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return (vector bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return (vector bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return (vector bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
+}
+
+/*-- vec_find_any_eq_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaeb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaeh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaef(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaebs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaefs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaezb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaezh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaezf(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx_cc(vector bool char __a, vector bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx_cc(vector unsigned short __a,
+ vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vfaezhs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_or_0_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx_cc(vector bool int __a, vector bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_ne --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector signed char __a, vector signed char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector bool char __a, vector bool char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vfaeb(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector signed short __a, vector signed short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector bool short __a, vector bool short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vfaeh(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector signed int __a, vector signed int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector bool int __a, vector bool int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vfaef(__a, __b, 12);
+}
+
+/*-- vec_find_any_ne_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return (vector bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return (vector bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return (vector bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
+}
+
+/*-- vec_find_any_ne_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaeb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaeh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaef(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaebs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaefs(__a, __b, 8, __cc);
+}
+
+/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaezb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaezh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaezf(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx_cc(vector bool char __a, vector bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx_cc(vector unsigned short __a,
+ vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vfaezhs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_or_0_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx_cc(vector bool int __a, vector bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
+}
+
+#undef __constant_pow2_range
+#undef __constant_range
+#undef __constant
+#undef __ATTRS_o
+#undef __ATTRS_o_ai
+#undef __ATTRS_ai
+
+#else
+
+#error "Use -fzvector to enable vector extensions"
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/wmmintrin.h b/clang-2690385/lib64/clang/3.8/include/wmmintrin.h
new file mode 100644
index 00000000..a2d93101
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/wmmintrin.h
@@ -0,0 +1,33 @@
+/*===---- wmmintrin.h - AES intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _WMMINTRIN_H
+#define _WMMINTRIN_H
+
+#include <emmintrin.h>
+
+#include <__wmmintrin_aes.h>
+
+#include <__wmmintrin_pclmul.h>
+
+#endif /* _WMMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/x86intrin.h b/clang-2690385/lib64/clang/3.8/include/x86intrin.h
new file mode 100644
index 00000000..4d8077e3
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/x86intrin.h
@@ -0,0 +1,57 @@
+/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#define __X86INTRIN_H
+
+#include <ia32intrin.h>
+
+#include <immintrin.h>
+
+#include <mm3dnow.h>
+
+#include <bmiintrin.h>
+
+#include <bmi2intrin.h>
+
+#include <lzcntintrin.h>
+
+#include <popcntintrin.h>
+
+#include <rdseedintrin.h>
+
+#include <prfchwintrin.h>
+
+#include <ammintrin.h>
+
+#include <fma4intrin.h>
+
+#include <xopintrin.h>
+
+#include <tbmintrin.h>
+
+#include <f16cintrin.h>
+
+/* FIXME: LWP */
+
+#endif /* __X86INTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/xmmintrin.h b/clang-2690385/lib64/clang/3.8/include/xmmintrin.h
new file mode 100644
index 00000000..ae0b2cd1
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/xmmintrin.h
@@ -0,0 +1,1010 @@
+/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __XMMINTRIN_H
+#define __XMMINTRIN_H
+
+#include <mmintrin.h>
+
+typedef int __v4si __attribute__((__vector_size__(16)));
+typedef float __v4sf __attribute__((__vector_size__(16)));
+typedef float __m128 __attribute__((__vector_size__(16)));
+
+/* This header should only be included in a hosted environment as it depends on
+ * a standard library to provide allocation routines. */
+#if __STDC_HOSTED__
+#include <mm_malloc.h>
+#endif
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse")))
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_add_ss(__m128 __a, __m128 __b)
+{
+ __a[0] += __b[0];
+ return __a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_add_ps(__m128 __a, __m128 __b)
+{
+ return __a + __b;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sub_ss(__m128 __a, __m128 __b)
+{
+ __a[0] -= __b[0];
+ return __a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sub_ps(__m128 __a, __m128 __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mul_ss(__m128 __a, __m128 __b)
+{
+ __a[0] *= __b[0];
+ return __a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mul_ps(__m128 __a, __m128 __b)
+{
+ return __a * __b;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_div_ss(__m128 __a, __m128 __b)
+{
+ __a[0] /= __b[0];
+ return __a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_div_ps(__m128 __a, __m128 __b)
+{
+ return __a / __b;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sqrt_ss(__m128 __a)
+{
+ __m128 __c = __builtin_ia32_sqrtss(__a);
+ return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sqrt_ps(__m128 __a)
+{
+ return __builtin_ia32_sqrtps(__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp_ss(__m128 __a)
+{
+ __m128 __c = __builtin_ia32_rcpss(__a);
+ return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp_ps(__m128 __a)
+{
+ return __builtin_ia32_rcpps(__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt_ss(__m128 __a)
+{
+ __m128 __c = __builtin_ia32_rsqrtss(__a);
+ return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt_ps(__m128 __a)
+{
+ return __builtin_ia32_rsqrtps(__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_min_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_minss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_min_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_minps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_max_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_maxss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_max_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_maxps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_and_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4si)__a & (__v4si)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_andnot_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)(~(__v4si)__a & (__v4si)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_or_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4si)__a | (__v4si)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_xor_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4si)__a ^ (__v4si)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpeq_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpeqss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpeq_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpeqps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmplt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpltss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmplt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpltps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmple_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpless(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmple_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpleps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpgt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector(__a,
+ __builtin_ia32_cmpltss(__b, __a),
+ 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpgt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpltps(__b, __a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpge_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector(__a,
+ __builtin_ia32_cmpless(__b, __a),
+ 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpge_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpleps(__b, __a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpneq_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpneqss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpneq_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpneqps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnlt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnltss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnlt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnltps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnle_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnless(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnle_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnleps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpngt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector(__a,
+ __builtin_ia32_cmpnltss(__b, __a),
+ 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpngt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnltps(__b, __a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnge_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector(__a,
+ __builtin_ia32_cmpnless(__b, __a),
+ 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnge_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnleps(__b, __a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpord_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpordss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpord_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpordps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpunord_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpunordss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpunord_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpunordps(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comieq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comieq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comilt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comilt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comile_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comile(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comigt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comigt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comige_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comige(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comineq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comineq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomieq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomieq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomilt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomilt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomile_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomile(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomigt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomigt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomige_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomige(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomineq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomineq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtss_si32(__m128 __a)
+{
+ return __builtin_ia32_cvtss2si(__a);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvt_ss2si(__m128 __a)
+{
+ return _mm_cvtss_si32(__a);
+}
+
+#ifdef __x86_64__
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtss_si64(__m128 __a)
+{
+ return __builtin_ia32_cvtss2si64(__a);
+}
+
+#endif
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtps_pi32(__m128 __a)
+{
+ return (__m64)__builtin_ia32_cvtps2pi(__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvt_ps2pi(__m128 __a)
+{
+ return _mm_cvtps_pi32(__a);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvttss_si32(__m128 __a)
+{
+ return __a[0];
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtt_ss2si(__m128 __a)
+{
+ return _mm_cvttss_si32(__a);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvttss_si64(__m128 __a)
+{
+ return __a[0];
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvttps_pi32(__m128 __a)
+{
+ return (__m64)__builtin_ia32_cvttps2pi(__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtt_ps2pi(__m128 __a)
+{
+ return _mm_cvttps_pi32(__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtsi32_ss(__m128 __a, int __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvt_si2ss(__m128 __a, int __b)
+{
+ return _mm_cvtsi32_ss(__a, __b);
+}
+
+#ifdef __x86_64__
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtsi64_ss(__m128 __a, long long __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+#endif
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi32_ps(__m128 __a, __m64 __b)
+{
+ return __builtin_ia32_cvtpi2ps(__a, (__v2si)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvt_pi2ps(__m128 __a, __m64 __b)
+{
+ return _mm_cvtpi32_ps(__a, __b);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS
+_mm_cvtss_f32(__m128 __a)
+{
+ return __a[0];
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadh_pi(__m128 __a, const __m64 *__p)
+{
+ typedef float __mm_loadh_pi_v2f32 __attribute__((__vector_size__(8)));
+ struct __mm_loadh_pi_struct {
+ __mm_loadh_pi_v2f32 __u;
+ } __attribute__((__packed__, __may_alias__));
+ __mm_loadh_pi_v2f32 __b = ((struct __mm_loadh_pi_struct*)__p)->__u;
+ __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
+ return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadl_pi(__m128 __a, const __m64 *__p)
+{
+ typedef float __mm_loadl_pi_v2f32 __attribute__((__vector_size__(8)));
+ struct __mm_loadl_pi_struct {
+ __mm_loadl_pi_v2f32 __u;
+ } __attribute__((__packed__, __may_alias__));
+ __mm_loadl_pi_v2f32 __b = ((struct __mm_loadl_pi_struct*)__p)->__u;
+ __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
+ return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_load_ss(const float *__p)
+{
+ struct __mm_load_ss_struct {
+ float __u;
+ } __attribute__((__packed__, __may_alias__));
+ float __u = ((struct __mm_load_ss_struct*)__p)->__u;
+ return (__m128){ __u, 0, 0, 0 };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_load1_ps(const float *__p)
+{
+ struct __mm_load1_ps_struct {
+ float __u;
+ } __attribute__((__packed__, __may_alias__));
+ float __u = ((struct __mm_load1_ps_struct*)__p)->__u;
+ return (__m128){ __u, __u, __u, __u };
+}
+
+#define _mm_load_ps1(p) _mm_load1_ps(p)
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_load_ps(const float *__p)
+{
+ return *(__m128*)__p;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadu_ps(const float *__p)
+{
+ struct __loadu_ps {
+ __m128 __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_ps*)__p)->__v;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadr_ps(const float *__p)
+{
+ __m128 __a = _mm_load_ps(__p);
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_undefined_ps()
+{
+ return (__m128)__builtin_ia32_undef128();
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set_ss(float __w)
+{
+ return (__m128){ __w, 0, 0, 0 };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set1_ps(float __w)
+{
+ return (__m128){ __w, __w, __w, __w };
+}
+
+/* Microsoft specific. */
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set_ps1(float __w)
+{
+ return _mm_set1_ps(__w);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set_ps(float __z, float __y, float __x, float __w)
+{
+ return (__m128){ __w, __x, __y, __z };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_setr_ps(float __z, float __y, float __x, float __w)
+{
+ return (__m128){ __z, __y, __x, __w };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_setzero_ps(void)
+{
+ return (__m128){ 0, 0, 0, 0 };
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeh_pi(__m64 *__p, __m128 __a)
+{
+ __builtin_ia32_storehps((__v2si *)__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storel_pi(__m64 *__p, __m128 __a)
+{
+ __builtin_ia32_storelps((__v2si *)__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_ss(float *__p, __m128 __a)
+{
+ struct __mm_store_ss_struct {
+ float __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store_ss_struct*)__p)->__u = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_ps(float *__p, __m128 __a)
+{
+ __builtin_ia32_storeups(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store1_ps(float *__p, __m128 __a)
+{
+ __a = __builtin_shufflevector(__a, __a, 0, 0, 0, 0);
+ _mm_storeu_ps(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_ps1(float *__p, __m128 __a)
+{
+ return _mm_store1_ps(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_ps(float *__p, __m128 __a)
+{
+ *(__m128 *)__p = __a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storer_ps(float *__p, __m128 __a)
+{
+ __a = __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
+ _mm_store_ps(__p, __a);
+}
+
+#define _MM_HINT_T0 3
+#define _MM_HINT_T1 2
+#define _MM_HINT_T2 1
+#define _MM_HINT_NTA 0
+
+#ifndef _MSC_VER
+/* FIXME: We have to #define this because "sel" must be a constant integer, and
+ Sema doesn't do any form of constant propagation yet. */
+
+#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
+#endif
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_pi(__m64 *__p, __m64 __a)
+{
+ __builtin_ia32_movntq(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_ps(float *__p, __m128 __a)
+{
+ __builtin_ia32_movntps(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_sfence(void)
+{
+ __builtin_ia32_sfence();
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_extract_pi16(__m64 __a, int __n)
+{
+ __v4hi __b = (__v4hi)__a;
+ return (unsigned short)__b[__n & 3];
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_insert_pi16(__m64 __a, int __d, int __n)
+{
+ __v4hi __b = (__v4hi)__a;
+ __b[__n & 3] = __d;
+ return (__m64)__b;
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_max_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_max_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_min_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_min_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_pi8(__m64 __a)
+{
+ return __builtin_ia32_pmovmskb((__v8qi)__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mulhi_pu16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b);
+}
+
+#define _mm_shuffle_pi16(a, n) __extension__ ({ \
+ (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)); })
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
+{
+ __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_avg_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_avg_pu16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sad_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_getcsr(void)
+{
+ return __builtin_ia32_stmxcsr();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_setcsr(unsigned int __i)
+{
+ __builtin_ia32_ldmxcsr(__i);
+}
+
+#define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
+ (mask) & 0x3, ((mask) & 0xc) >> 2, \
+ (((mask) & 0x30) >> 4) + 4, \
+ (((mask) & 0xc0) >> 6) + 4); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_unpackhi_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector(__a, __b, 2, 6, 3, 7);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_unpacklo_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 4, 1, 5);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_move_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector(__a, __b, 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_movehl_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector(__a, __b, 6, 7, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_movelh_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 1, 4, 5);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi16_ps(__m64 __a)
+{
+ __m64 __b, __c;
+ __m128 __r;
+
+ __b = _mm_setzero_si64();
+ __b = _mm_cmpgt_pi16(__b, __a);
+ __c = _mm_unpackhi_pi16(__a, __b);
+ __r = _mm_setzero_ps();
+ __r = _mm_cvtpi32_ps(__r, __c);
+ __r = _mm_movelh_ps(__r, __r);
+ __c = _mm_unpacklo_pi16(__a, __b);
+ __r = _mm_cvtpi32_ps(__r, __c);
+
+ return __r;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpu16_ps(__m64 __a)
+{
+ __m64 __b, __c;
+ __m128 __r;
+
+ __b = _mm_setzero_si64();
+ __c = _mm_unpackhi_pi16(__a, __b);
+ __r = _mm_setzero_ps();
+ __r = _mm_cvtpi32_ps(__r, __c);
+ __r = _mm_movelh_ps(__r, __r);
+ __c = _mm_unpacklo_pi16(__a, __b);
+ __r = _mm_cvtpi32_ps(__r, __c);
+
+ return __r;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi8_ps(__m64 __a)
+{
+ __m64 __b;
+
+ __b = _mm_setzero_si64();
+ __b = _mm_cmpgt_pi8(__b, __a);
+ __b = _mm_unpacklo_pi8(__a, __b);
+
+ return _mm_cvtpi16_ps(__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpu8_ps(__m64 __a)
+{
+ __m64 __b;
+
+ __b = _mm_setzero_si64();
+ __b = _mm_unpacklo_pi8(__a, __b);
+
+ return _mm_cvtpi16_ps(__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
+{
+ __m128 __c;
+
+ __c = _mm_setzero_ps();
+ __c = _mm_cvtpi32_ps(__c, __b);
+ __c = _mm_movelh_ps(__c, __c);
+
+ return _mm_cvtpi32_ps(__c, __a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtps_pi16(__m128 __a)
+{
+ __m64 __b, __c;
+
+ __b = _mm_cvtps_pi32(__a);
+ __a = _mm_movehl_ps(__a, __a);
+ __c = _mm_cvtps_pi32(__a);
+
+ return _mm_packs_pi32(__b, __c);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtps_pi8(__m128 __a)
+{
+ __m64 __b, __c;
+
+ __b = _mm_cvtps_pi16(__a);
+ __c = _mm_setzero_si64();
+
+ return _mm_packs_pi16(__b, __c);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_ps(__m128 __a)
+{
+ return __builtin_ia32_movmskps(__a);
+}
+
+
+#ifdef _MSC_VER
+#define _MM_ALIGN16 __declspec(align(16))
+#endif
+
+#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
+
+#define _MM_EXCEPT_INVALID (0x0001)
+#define _MM_EXCEPT_DENORM (0x0002)
+#define _MM_EXCEPT_DIV_ZERO (0x0004)
+#define _MM_EXCEPT_OVERFLOW (0x0008)
+#define _MM_EXCEPT_UNDERFLOW (0x0010)
+#define _MM_EXCEPT_INEXACT (0x0020)
+#define _MM_EXCEPT_MASK (0x003f)
+
+#define _MM_MASK_INVALID (0x0080)
+#define _MM_MASK_DENORM (0x0100)
+#define _MM_MASK_DIV_ZERO (0x0200)
+#define _MM_MASK_OVERFLOW (0x0400)
+#define _MM_MASK_UNDERFLOW (0x0800)
+#define _MM_MASK_INEXACT (0x1000)
+#define _MM_MASK_MASK (0x1f80)
+
+#define _MM_ROUND_NEAREST (0x0000)
+#define _MM_ROUND_DOWN (0x2000)
+#define _MM_ROUND_UP (0x4000)
+#define _MM_ROUND_TOWARD_ZERO (0x6000)
+#define _MM_ROUND_MASK (0x6000)
+
+#define _MM_FLUSH_ZERO_MASK (0x8000)
+#define _MM_FLUSH_ZERO_ON (0x8000)
+#define _MM_FLUSH_ZERO_OFF (0x0000)
+
+#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
+#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
+#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK)
+#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)
+
+#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x)))
+#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))
+#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x)))
+#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))
+
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+do { \
+ __m128 tmp3, tmp2, tmp1, tmp0; \
+ tmp0 = _mm_unpacklo_ps((row0), (row1)); \
+ tmp2 = _mm_unpacklo_ps((row2), (row3)); \
+ tmp1 = _mm_unpackhi_ps((row0), (row1)); \
+ tmp3 = _mm_unpackhi_ps((row2), (row3)); \
+ (row0) = _mm_movelh_ps(tmp0, tmp2); \
+ (row1) = _mm_movehl_ps(tmp2, tmp0); \
+ (row2) = _mm_movelh_ps(tmp1, tmp3); \
+ (row3) = _mm_movehl_ps(tmp3, tmp1); \
+} while (0)
+
+/* Aliases for compatibility. */
+#define _m_pextrw _mm_extract_pi16
+#define _m_pinsrw _mm_insert_pi16
+#define _m_pmaxsw _mm_max_pi16
+#define _m_pmaxub _mm_max_pu8
+#define _m_pminsw _mm_min_pi16
+#define _m_pminub _mm_min_pu8
+#define _m_pmovmskb _mm_movemask_pi8
+#define _m_pmulhuw _mm_mulhi_pu16
+#define _m_pshufw _mm_shuffle_pi16
+#define _m_maskmovq _mm_maskmove_si64
+#define _m_pavgb _mm_avg_pu8
+#define _m_pavgw _mm_avg_pu16
+#define _m_psadbw _mm_sad_pu8
+#define _m_ _mm_
+#define _m_ _mm_
+
+#undef __DEFAULT_FN_ATTRS
+
+/* Ugly hack for backwards-compatibility (compatible with gcc) */
+#if defined(__SSE2__) && !__has_feature(modules)
+#include <emmintrin.h>
+#endif
+
+#endif /* __XMMINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/xopintrin.h b/clang-2690385/lib64/clang/3.8/include/xopintrin.h
new file mode 100644
index 00000000..f07f51c2
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/xopintrin.h
@@ -0,0 +1,782 @@
+/*===---- xopintrin.h - XOP intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <xopintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __XOPINTRIN_H
+#define __XOPINTRIN_H
+
+#include <fma4intrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop")))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddw_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epi32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadddq((__v4si)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddw_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epu16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epu16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epu32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubw_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubd_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubq_epi32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpcmov(__A, __B, __C);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)
+{
+ return (__m256i)__builtin_ia32_vpcmov_256(__A, __B, __C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);
+}
+
+#define _mm_roti_epi8(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)); })
+
+#define _mm_roti_epi16(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)); })
+
+#define _mm_roti_epi32(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)); })
+
+#define _mm_roti_epi64(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B);
+}
+
+#define _mm_com_epu8(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (N)); })
+
+#define _mm_com_epu16(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (N)); })
+
+#define _mm_com_epu32(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (N)); })
+
+#define _mm_com_epu64(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (N)); })
+
+#define _mm_com_epi8(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (N)); })
+
+#define _mm_com_epi16(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (N)); })
+
+#define _mm_com_epi32(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (N)); })
+
+#define _mm_com_epi64(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (N)); })
+
+#define _MM_PCOMCTRL_LT 0
+#define _MM_PCOMCTRL_LE 1
+#define _MM_PCOMCTRL_GT 2
+#define _MM_PCOMCTRL_GE 3
+#define _MM_PCOMCTRL_EQ 4
+#define _MM_PCOMCTRL_NEQ 5
+#define _MM_PCOMCTRL_FALSE 6
+#define _MM_PCOMCTRL_TRUE 7
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+#define _mm_permute2_pd(X, Y, C, I) __extension__ ({ \
+ (__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (__v2di)(__m128i)(C), (I)); })
+
+#define _mm256_permute2_pd(X, Y, C, I) __extension__ ({ \
+ (__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
+ (__v4df)(__m256d)(Y), \
+ (__v4di)(__m256i)(C), (I)); })
+
+#define _mm_permute2_ps(X, Y, C, I) __extension__ ({ \
+ (__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
+ (__v4si)(__m128i)(C), (I)); })
+
+#define _mm256_permute2_ps(X, Y, C, I) __extension__ ({ \
+ (__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
+ (__v8sf)(__m256)(Y), \
+ (__v8si)(__m256i)(C), (I)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_frcz_ss(__m128 __A)
+{
+ return (__m128)__builtin_ia32_vfrczss((__v4sf)__A);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_frcz_sd(__m128d __A)
+{
+ return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_frcz_ps(__m128 __A)
+{
+ return (__m128)__builtin_ia32_vfrczps((__v4sf)__A);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_frcz_pd(__m128d __A)
+{
+ return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_frcz_ps(__m256 __A)
+{
+ return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_frcz_pd(__m256d __A)
+{
+ return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __XOPINTRIN_H */
diff --git a/clang-2690385/lib64/clang/3.8/include/xsavecintrin.h b/clang-2690385/lib64/clang/3.8/include/xsavecintrin.h
new file mode 100644
index 00000000..598470a6
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/xsavecintrin.h
@@ -0,0 +1,48 @@
+/*===---- xsavecintrin.h - XSAVEC intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsavecintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVECINTRIN_H
+#define __XSAVECINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsavec(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsavec(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsavec64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsavec64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/xsaveintrin.h b/clang-2690385/lib64/clang/3.8/include/xsaveintrin.h
new file mode 100644
index 00000000..a2e6b2e7
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/xsaveintrin.h
@@ -0,0 +1,58 @@
+/*===---- xsaveintrin.h - XSAVE intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsaveintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVEINTRIN_H
+#define __XSAVEINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsave")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsave(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsave(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstor(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xrstor(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsave64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsave64(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstor64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xrstor64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/xsaveoptintrin.h b/clang-2690385/lib64/clang/3.8/include/xsaveoptintrin.h
new file mode 100644
index 00000000..d3faae78
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/xsaveoptintrin.h
@@ -0,0 +1,48 @@
+/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsaveoptintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVEOPTINTRIN_H
+#define __XSAVEOPTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaveopt")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaveopt(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsaveopt(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaveopt64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsaveopt64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/xsavesintrin.h b/clang-2690385/lib64/clang/3.8/include/xsavesintrin.h
new file mode 100644
index 00000000..c5e540a8
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/xsavesintrin.h
@@ -0,0 +1,58 @@
+/*===---- xsavesintrin.h - XSAVES intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsavesintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVESINTRIN_H
+#define __XSAVESINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaves")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaves(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsaves(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstors(void *__p, unsigned long long __m) {
+ __builtin_ia32_xrstors(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstors64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xrstors64(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaves64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsaves64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/include/xtestintrin.h b/clang-2690385/lib64/clang/3.8/include/xtestintrin.h
new file mode 100644
index 00000000..9d3378fd
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/include/xtestintrin.h
@@ -0,0 +1,41 @@
+/*===---- xtestintrin.h - XTEST intrinsic ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xtestintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XTESTINTRIN_H
+#define __XTESTINTRIN_H
+
+/* xtest returns non-zero if the instruction is executed within an RTM or active
+ * HLE region. */
+/* FIXME: This can be an either or for RTM/HLE. Deal with this when HLE is
+ * supported. */
+static __inline__ int
+ __attribute__((__always_inline__, __nodebug__, __target__("rtm")))
+ _xtest(void) {
+ return __builtin_ia32_xtest();
+}
+
+#endif
diff --git a/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-aarch64-android.so b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-aarch64-android.so
new file mode 100755
index 00000000..c919e878
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-aarch64-android.so
Binary files differ
diff --git a/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-arm-android.so b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-arm-android.so
new file mode 100755
index 00000000..4d49eb9b
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-arm-android.so
Binary files differ
diff --git a/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-i686-android.so b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-i686-android.so
new file mode 100755
index 00000000..f592c263
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.asan-i686-android.so
Binary files differ
diff --git a/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-aarch64-android.a b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-aarch64-android.a
new file mode 100644
index 00000000..12c57e1a
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-aarch64-android.a
Binary files differ
diff --git a/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-arm-android.a b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-arm-android.a
new file mode 100644
index 00000000..c12bb72a
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-arm-android.a
Binary files differ
diff --git a/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-i686-android.a b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-i686-android.a
new file mode 100644
index 00000000..7d3bd7b1
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-i686-android.a
Binary files differ
diff --git a/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-mips64el-android.a b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-mips64el-android.a
new file mode 100644
index 00000000..6bb59a00
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-mips64el-android.a
Binary files differ
diff --git a/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-mipsel-android.a b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-mipsel-android.a
new file mode 100644
index 00000000..e764a34a
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-mipsel-android.a
Binary files differ
diff --git a/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-x86_64-android.a b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-x86_64-android.a
new file mode 100644
index 00000000..7501074a
--- /dev/null
+++ b/clang-2690385/lib64/clang/3.8/lib/linux/libclang_rt.profile-x86_64-android.a
Binary files differ
diff --git a/clang-2690385/lib64/libLLVM.dylib b/clang-2690385/lib64/libLLVM.dylib
new file mode 100755
index 00000000..f009ed98
--- /dev/null
+++ b/clang-2690385/lib64/libLLVM.dylib
Binary files differ
diff --git a/clang-2690385/lib64/libc++.dylib b/clang-2690385/lib64/libc++.dylib
new file mode 100755
index 00000000..b49d8a4c
--- /dev/null
+++ b/clang-2690385/lib64/libc++.dylib
Binary files differ
diff --git a/clang-2690385/repo.prop b/clang-2690385/repo.prop
new file mode 100644
index 00000000..f6ed60d1
--- /dev/null
+++ b/clang-2690385/repo.prop
@@ -0,0 +1,40 @@
+platform/bionic d057f449a0a3ad2f574d11ffce060d69db9fc019
+platform/build 9d5fb14b2d551e522c51f78361b77653191c8c91
+platform/build/blueprint 3a89d1ce9fe7a5160ee87ee11f99edbf540020fc
+platform/build/kati 158867849c8a55d1db95ead7cbeca4aacd47350f
+platform/build/soong dc5d28ad1736d31787da910627a3711c4df2a588
+platform/external/clang d26af7c37f2f30ac32f3c568bff186678850634a
+platform/external/compiler-rt 7723e4df95a487c2a8a33d850318877c4efacf7d
+platform/external/jemalloc ec006d884d5d219a070fec8df56a233ed524ff94
+platform/external/libcxx ff162f079ead2995956f6c9594fe24fc772df8a9
+platform/external/libcxxabi fb11cb31ea214386a681d2f28b126b310ac53657
+platform/external/libunwind a5ab7da8aeed49860493fef8a61d4ffe9141c3a7
+platform/external/libunwind_llvm 7daa018e23400c80c8cde834207fe14f036b2bf7
+platform/external/llvm 0b9e5445ca68d305072188e20f6c1eef22b0620e
+platform/manifest 7a37dd810f341d953b007d4422da37e0bafb95f3
+platform/prebuilts/clang/host/darwin-x86 95b185af6709631b1d2c6e3193b57133a4463b99
+platform/prebuilts/clang/host/linux-x86 812a6b5d772cab78761d335f3c7f5876461e3b3b
+platform/prebuilts/clang/host/windows-x86 295f01bf703ff0d65a7aa58b8030dbabb8acd2ed
+platform/prebuilts/gcc/darwin-x86/aarch64/aarch64-linux-android-4.9 e5dd6d374a4ec26d9fec0673dc3e12c96cd4995c
+platform/prebuilts/gcc/darwin-x86/arm/arm-eabi-4.8 6d08ca9f45ff685648fd13c75bf5cac4b11c19bb
+platform/prebuilts/gcc/darwin-x86/arm/arm-linux-androideabi-4.9 3342d2d373fd4b8e6e1838a57f55e064b683d47c
+platform/prebuilts/gcc/darwin-x86/host/headers 4ac4f7cc41cf3c9e36fc3d6cf37fd1cfa9587a68
+platform/prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1 ec5aa66aaa4964c27564d0ec84dc1f18a2d72b7e
+platform/prebuilts/gcc/darwin-x86/mips/mips64el-linux-android-4.9 49e6b0916359a26ffe4a5ecb8d50ea2d6290f4fa
+platform/prebuilts/gcc/darwin-x86/x86/x86_64-linux-android-4.9 1fe9a13be8177f3d8c15f39edf146afbe0289507
+platform/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 52c01c4a62fd891f4c517ef9d097833639fdea12
+platform/prebuilts/gcc/linux-x86/arm/arm-eabi-4.8 26e93f6af47f7bd3a9beb5c102a5f45e19bfa38a
+platform/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 c09e1b3a55153d1ba142d5bf548c90487ea71f9e
+platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.11-4.8 1273431a189717842f033573eb8c777e13dd88b7
+platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 73ca99196723f810dad42390d154654354f57c16
+platform/prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8 c795eed743bc6cee4ead5407cc237c43abf6fa26
+platform/prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9 425b0db29e4af76ad2cc099304d8209558be142f
+platform/prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 c33513f9de95fcdf4ec832db5e3ebd612382f541
+platform/prebuilts/go/darwin-x86 09eb3b159acf6783eb554f767d0d8a137d0c41d9
+platform/prebuilts/go/linux-x86 6ff23253f8283d0c81c9db51c2d7803e086c93ad
+platform/prebuilts/ndk e99ac48ba4ed5ffbc8cd20c7a320c2baa02f1cd7
+platform/prebuilts/ninja/darwin-x86 7ca9262b06ba99ba2270e0928dba87c1eb8d8ca7
+platform/prebuilts/ninja/linux-x86 0b514c3a1dc18d1c9ed59972eba3b65af4a72e37
+platform/prebuilts/sdk b58e1855feb6640e00c1eba1ca5b90d1f94ff0dc
+platform/system/core 371e7ea170d749489a0eb7085347f58b7be63734
+toolchain/binutils 066607388945f542727bd5035fb8d84bfd798034
diff --git a/clang-2690385/tools/scan-build/CMakeLists.txt b/clang-2690385/tools/scan-build/CMakeLists.txt
new file mode 100644
index 00000000..78c243d8
--- /dev/null
+++ b/clang-2690385/tools/scan-build/CMakeLists.txt
@@ -0,0 +1,82 @@
+option(CLANG_INSTALL_SCANBUILD "Install the scan-build tool" ON)
+
+include(GNUInstallDirs)
+
+if (WIN32 AND NOT CYGWIN)
+ set(BinFiles
+ scan-build.bat)
+ set(LibexecFiles
+ ccc-analyzer.bat
+ c++-analyzer.bat)
+else()
+ set(BinFiles
+ scan-build)
+ set(LibexecFiles
+ ccc-analyzer
+ c++-analyzer)
+ if (APPLE)
+ list(APPEND BinFiles
+ set-xcode-analyzer)
+ endif()
+endif()
+
+set(ManPages
+ scan-build.1)
+
+set(ShareFiles
+ scanview.css
+ sorttable.js)
+
+
+if(CLANG_INSTALL_SCANBUILD)
+ foreach(BinFile ${BinFiles})
+ add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/bin/${BinFile}
+ COMMAND ${CMAKE_COMMAND} -E make_directory
+ ${CMAKE_BINARY_DIR}/bin
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_CURRENT_SOURCE_DIR}/bin/${BinFile}
+ ${CMAKE_BINARY_DIR}/bin/
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/bin/${BinFile})
+ list(APPEND Depends ${CMAKE_BINARY_DIR}/bin/${BinFile})
+ install(PROGRAMS bin/${BinFile} DESTINATION bin)
+ endforeach()
+
+ foreach(LibexecFile ${LibexecFiles})
+ add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/libexec/${LibexecFile}
+ COMMAND ${CMAKE_COMMAND} -E make_directory
+ ${CMAKE_BINARY_DIR}/libexec
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_CURRENT_SOURCE_DIR}/libexec/${LibexecFile}
+ ${CMAKE_BINARY_DIR}/libexec/
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/libexec/${LibexecFile})
+ list(APPEND Depends ${CMAKE_BINARY_DIR}/libexec/${LibexecFile})
+ install(PROGRAMS libexec/${LibexecFile} DESTINATION libexec)
+ endforeach()
+
+ foreach(ManPage ${ManPages})
+ add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_MANDIR}/man1/${ManPage}
+ COMMAND ${CMAKE_COMMAND} -E make_directory
+ ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_MANDIR}/man1
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_CURRENT_SOURCE_DIR}/man/${ManPage}
+ ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_MANDIR}/man1/
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/man/${ManPage})
+ list(APPEND Depends ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_MANDIR}/man1/${ManPage})
+ install(PROGRAMS man/${ManPage} DESTINATION ${CMAKE_INSTALL_MANDIR}/man1)
+ endforeach()
+
+ foreach(ShareFile ${ShareFiles})
+ add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/share/scan-build/${ShareFile}
+ COMMAND ${CMAKE_COMMAND} -E make_directory
+ ${CMAKE_BINARY_DIR}/share/scan-build
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_CURRENT_SOURCE_DIR}/share/scan-build/${ShareFile}
+ ${CMAKE_BINARY_DIR}/share/scan-build/
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/share/scan-build/${ShareFile})
+ list(APPEND Depends ${CMAKE_BINARY_DIR}/share/scan-build/${ShareFile})
+ install(FILES share/scan-build/${ShareFile} DESTINATION share/scan-build)
+ endforeach()
+
+ add_custom_target(scan-build ALL DEPENDS ${Depends})
+ set_target_properties(scan-build PROPERTIES FOLDER "Misc")
+endif()
diff --git a/clang-2690385/tools/scan-build/Makefile b/clang-2690385/tools/scan-build/Makefile
new file mode 100644
index 00000000..23aa1988
--- /dev/null
+++ b/clang-2690385/tools/scan-build/Makefile
@@ -0,0 +1,53 @@
+##===- tools/scan-build/Makefile ---------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../..
+
+include $(CLANG_LEVEL)/../../Makefile.config
+include $(CLANG_LEVEL)/Makefile
+
+ifeq ($(HOST_OS),MingW)
+ Suffix := .bat
+endif
+
+CLANG_INSTALL_SCANBUILD ?= 1
+
+ifeq ($(CLANG_INSTALL_SCANBUILD), 1)
+ InstallTargets := $(ToolDir)/scan-build$(Suffix) \
+ $(LibexecDir)/c++-analyzer$(Suffix) \
+ $(LibexecDir)/ccc-analyzer$(Suffix) \
+ $(ShareDir)/scan-build/scanview.css \
+ $(ShareDir)/scan-build/sorttable.js \
+ $(ShareDir)/man/man1/scan-build.1
+
+ ifeq ($(HOST_OS),Darwin)
+ InstallTargets := $(InstallTargets) $(ToolDir)/set-xcode-analyzer
+ endif
+endif
+
+all:: $(InstallTargets)
+
+$(ToolDir)/%: bin/% Makefile $(ToolDir)/.dir
+ $(Echo) "Copying $(notdir $<) to the 'bin' directory..."
+ $(Verb)cp $< $@
+ $(Verb)chmod +x $@
+
+$(LibexecDir)/%: libexec/% Makefile $(LibexecDir)/.dir
+ $(Echo) "Copying $(notdir $<) to the 'libexec' directory..."
+ $(Verb)cp $< $@
+ $(Verb)chmod +x $@
+
+$(ShareDir)/man/man1/%: man/% Makefile $(ShareDir)/man/man1/.dir
+ $(Echo) "Copying $(notdir $<) to the 'man' directory..."
+ $(Verb)cp $< $@
+
+$(ShareDir)/scan-build/%: share/scan-build/% Makefile $(ShareDir)/scan-build/.dir
+ $(Echo) "Copying $(notdir $<) to the 'share' directory..."
+ $(Verb)cp $< $@
+
diff --git a/clang-2690385/tools/scan-build/bin/scan-build b/clang-2690385/tools/scan-build/bin/scan-build
new file mode 100755
index 00000000..6a144849
--- /dev/null
+++ b/clang-2690385/tools/scan-build/bin/scan-build
@@ -0,0 +1,1832 @@
+#!/usr/bin/env perl
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# A script designed to wrap a build so that all calls to gcc are intercepted
+# and piped to the static analyzer.
+#
+##===----------------------------------------------------------------------===##
+
+use strict;
+use warnings;
+use FindBin qw($RealBin);
+use Digest::MD5;
+use File::Basename;
+use File::Find;
+use File::Copy qw(copy);
+use File::Path qw( rmtree mkpath );
+use Term::ANSIColor;
+use Term::ANSIColor qw(:constants);
+use Cwd qw/ getcwd abs_path /;
+use Sys::Hostname;
+use Hash::Util qw(lock_keys);
+
+my $Prog = "scan-build";
+my $BuildName;
+my $BuildDate;
+
+my $TERM = $ENV{'TERM'};
+my $UseColor = (defined $TERM and $TERM =~ 'xterm-.*color' and -t STDOUT
+ and defined $ENV{'SCAN_BUILD_COLOR'});
+
+# Portability: getpwuid is not implemented for Win32 (see Perl language
+# reference, perlport), use getlogin instead.
+my $UserName = HtmlEscape(getlogin() || getpwuid($<) || 'unknown');
+my $HostName = HtmlEscape(hostname() || 'unknown');
+my $CurrentDir = HtmlEscape(getcwd());
+
+my $CmdArgs;
+
+my $Date = localtime();
+
+# Command-line/config arguments.
+my %Options = (
+ Verbose => 0, # Verbose output from this script.
+ AnalyzeHeaders => 0,
+ OutputDir => undef, # Parent directory to store HTML files.
+ HtmlTitle => basename($CurrentDir)." - scan-build results",
+ IgnoreErrors => 0, # Ignore build errors.
+ ViewResults => 0, # View results when the build terminates.
+ ExitStatusFoundBugs => 0, # Exit status reflects whether bugs were found
+ KeepEmpty => 0, # Don't remove output directory even with 0 results.
+ EnableCheckers => {},
+ DisableCheckers => {},
+ UseCC => undef, # C compiler to use for compilation.
+ UseCXX => undef, # C++ compiler to use for compilation.
+ AnalyzerTarget => undef,
+ StoreModel => undef,
+ ConstraintsModel => undef,
+ InternalStats => undef,
+ OutputFormat => "html",
+ ConfigOptions => [], # Options to pass through to the analyzer's -analyzer-config flag.
+ ReportFailures => undef,
+ AnalyzerStats => 0,
+ MaxLoop => 0,
+ PluginsToLoad => [],
+ AnalyzerDiscoveryMethod => undef,
+ OverrideCompiler => 0 # The flag corresponding to the --override-compiler command line option.
+);
+lock_keys(%Options);
+
+##----------------------------------------------------------------------------##
+# Diagnostics
+##----------------------------------------------------------------------------##
+
+sub Diag {
+ if ($UseColor) {
+ print BOLD, MAGENTA "$Prog: @_";
+ print RESET;
+ }
+ else {
+ print "$Prog: @_";
+ }
+}
+
+sub ErrorDiag {
+ if ($UseColor) {
+ print STDERR BOLD, RED "$Prog: ";
+ print STDERR RESET, RED @_;
+ print STDERR RESET;
+ } else {
+ print STDERR "$Prog: @_";
+ }
+}
+
+sub DiagCrashes {
+ my $Dir = shift;
+ Diag ("The analyzer encountered problems on some source files.\n");
+ Diag ("Preprocessed versions of these sources were deposited in '$Dir/failures'.\n");
+ Diag ("Please consider submitting a bug report using these files:\n");
+ Diag (" http://clang-analyzer.llvm.org/filing_bugs.html\n")
+}
+
+sub DieDiag {
+ if ($UseColor) {
+ print STDERR BOLD, RED "$Prog: ";
+ print STDERR RESET, RED @_;
+ print STDERR RESET;
+ }
+ else {
+ print STDERR "$Prog: ", @_;
+ }
+ exit 1;
+}
+
+##----------------------------------------------------------------------------##
+# Print default checker names
+##----------------------------------------------------------------------------##
+
+if (grep /^--help-checkers$/, @ARGV) {
+ my @options = qx($0 -h);
+ foreach (@options) {
+ next unless /^ \+/;
+ s/^\s*//;
+ my ($sign, $name, @text) = split ' ', $_;
+ print $name, $/ if $sign eq '+';
+ }
+ exit 0;
+}
+
+##----------------------------------------------------------------------------##
+# Declaration of Clang options. Populated later.
+##----------------------------------------------------------------------------##
+
+my $Clang;
+my $ClangSB;
+my $ClangCXX;
+my $ClangVersion;
+
+##----------------------------------------------------------------------------##
+# GetHTMLRunDir - Construct an HTML directory name for the current sub-run.
+##----------------------------------------------------------------------------##
+
+sub GetHTMLRunDir {
+ die "Not enough arguments." if (@_ == 0);
+ my $Dir = shift @_;
+ my $TmpMode = 0;
+ if (!defined $Dir) {
+ $Dir = $ENV{'TMPDIR'} || $ENV{'TEMP'} || $ENV{'TMP'} || "/tmp";
+ $TmpMode = 1;
+ }
+
+ # Chop off any trailing '/' characters.
+ while ($Dir =~ /\/$/) { chop $Dir; }
+
+ # Get current date and time.
+ my @CurrentTime = localtime();
+ my $year = $CurrentTime[5] + 1900;
+ my $day = $CurrentTime[3];
+ my $month = $CurrentTime[4] + 1;
+ my $hour = $CurrentTime[2];
+ my $min = $CurrentTime[1];
+ my $sec = $CurrentTime[0];
+
+ my $TimeString = sprintf("%02d%02d%02d", $hour, $min, $sec);
+ my $DateString = sprintf("%d-%02d-%02d-%s-$$",
+ $year, $month, $day, $TimeString);
+
+ # Determine the run number.
+ my $RunNumber;
+
+ if (-d $Dir) {
+ if (! -r $Dir) {
+ DieDiag("directory '$Dir' exists but is not readable.\n");
+ }
+ # Iterate over all files in the specified directory.
+ my $max = 0;
+ opendir(DIR, $Dir);
+ my @FILES = grep { -d "$Dir/$_" } readdir(DIR);
+ closedir(DIR);
+
+ foreach my $f (@FILES) {
+ # Strip the prefix '$Prog-' if we are dumping files to /tmp.
+ if ($TmpMode) {
+ next if (!($f =~ /^$Prog-(.+)/));
+ $f = $1;
+ }
+
+ my @x = split/-/, $f;
+ next if (scalar(@x) != 4);
+ next if ($x[0] != $year);
+ next if ($x[1] != $month);
+ next if ($x[2] != $day);
+ next if ($x[3] != $TimeString);
+ next if ($x[4] != $$);
+
+ if ($x[5] > $max) {
+ $max = $x[5];
+ }
+ }
+
+ $RunNumber = $max + 1;
+ }
+ else {
+
+ if (-x $Dir) {
+ DieDiag("'$Dir' exists but is not a directory.\n");
+ }
+
+ if ($TmpMode) {
+ DieDiag("The directory '/tmp' does not exist or cannot be accessed.\n");
+ }
+
+ # $Dir does not exist. It will be automatically created by the
+ # clang driver. Set the run number to 1.
+
+ $RunNumber = 1;
+ }
+
+ die "RunNumber must be defined!" if (!defined $RunNumber);
+
+ # Append the run number.
+ my $NewDir;
+ if ($TmpMode) {
+ $NewDir = "$Dir/$Prog-$DateString-$RunNumber";
+ }
+ else {
+ $NewDir = "$Dir/$DateString-$RunNumber";
+ }
+
+ # Make sure that the directory does not exist in order to avoid hijack.
+ if (-e $NewDir) {
+ DieDiag("The directory '$NewDir' already exists.\n");
+ }
+
+ mkpath($NewDir);
+ return $NewDir;
+}
+
+sub SetHtmlEnv {
+
+ die "Wrong number of arguments." if (scalar(@_) != 2);
+
+ my $Args = shift;
+ my $Dir = shift;
+
+ die "No build command." if (scalar(@$Args) == 0);
+
+ my $Cmd = $$Args[0];
+
+ if ($Cmd =~ /configure/ || $Cmd =~ /autogen/) {
+ return;
+ }
+
+ if ($Options{Verbose}) {
+ Diag("Emitting reports for this run to '$Dir'.\n");
+ }
+
+ $ENV{'CCC_ANALYZER_HTML'} = $Dir;
+}
+
+##----------------------------------------------------------------------------##
+# ComputeDigest - Compute a digest of the specified file.
+##----------------------------------------------------------------------------##
+
+sub ComputeDigest {
+ my $FName = shift;
+ DieDiag("Cannot read $FName to compute Digest.\n") if (! -r $FName);
+
+ # Use Digest::MD5. We don't have to be cryptographically secure. We're
+ # just looking for duplicate files that come from a non-malicious source.
+ # We use Digest::MD5 because it is a standard Perl module that should
+ # come bundled on most systems.
+ open(FILE, $FName) or DieDiag("Cannot open $FName when computing Digest.\n");
+ binmode FILE;
+ my $Result = Digest::MD5->new->addfile(*FILE)->hexdigest;
+ close(FILE);
+
+ # Return the digest.
+ return $Result;
+}
+
+##----------------------------------------------------------------------------##
+# UpdatePrefix - Compute the common prefix of files.
+##----------------------------------------------------------------------------##
+
+my $Prefix;
+
+sub UpdatePrefix {
+ my $x = shift;
+ my $y = basename($x);
+ $x =~ s/\Q$y\E$//;
+
+ if (!defined $Prefix) {
+ $Prefix = $x;
+ return;
+ }
+
+ chop $Prefix while (!($x =~ /^\Q$Prefix/));
+}
+
+sub GetPrefix {
+ return $Prefix;
+}
+
+##----------------------------------------------------------------------------##
+# UpdateInFilePath - Update the path in the report file.
+##----------------------------------------------------------------------------##
+
+sub UpdateInFilePath {
+ my $fname = shift;
+ my $regex = shift;
+ my $newtext = shift;
+
+ open (RIN, $fname) or die "cannot open $fname";
+ open (ROUT, ">", "$fname.tmp") or die "cannot open $fname.tmp";
+
+ while (<RIN>) {
+ s/$regex/$newtext/;
+ print ROUT $_;
+ }
+
+ close (ROUT);
+ close (RIN);
+ rename("$fname.tmp", $fname)
+}
+
+##----------------------------------------------------------------------------##
+# AddStatLine - Decode and insert a statistics line into the database.
+##----------------------------------------------------------------------------##
+
+sub AddStatLine {
+ my $Line = shift;
+ my $Stats = shift;
+ my $File = shift;
+
+ print $Line . "\n";
+
+ my $Regex = qr/(.*?)\ ->\ Total\ CFGBlocks:\ (\d+)\ \|\ Unreachable
+ \ CFGBlocks:\ (\d+)\ \|\ Exhausted\ Block:\ (yes|no)\ \|\ Empty\ WorkList:
+ \ (yes|no)/x;
+
+ if ($Line !~ $Regex) {
+ return;
+ }
+
+ # Create a hash of the interesting fields
+ my $Row = {
+ Filename => $File,
+ Function => $1,
+ Total => $2,
+ Unreachable => $3,
+ Aborted => $4,
+ Empty => $5
+ };
+
+ # Add them to the stats array
+ push @$Stats, $Row;
+}
+
+##----------------------------------------------------------------------------##
+# ScanFile - Scan a report file for various identifying attributes.
+##----------------------------------------------------------------------------##
+
+# Sometimes a source file is scanned more than once, and thus produces
+# multiple error reports. We use a cache to solve this problem.
+
+my %AlreadyScanned;
+
+sub ScanFile {
+
+ my $Index = shift;
+ my $Dir = shift;
+ my $FName = shift;
+ my $Stats = shift;
+
+ # Compute a digest for the report file. Determine if we have already
+ # scanned a file that looks just like it.
+
+ my $digest = ComputeDigest("$Dir/$FName");
+
+ if (defined $AlreadyScanned{$digest}) {
+ # Redundant file. Remove it.
+ unlink("$Dir/$FName");
+ return;
+ }
+
+ $AlreadyScanned{$digest} = 1;
+
+ # At this point the report file is not world readable. Make it happen.
+ chmod(0644, "$Dir/$FName");
+
+ # Scan the report file for tags.
+ open(IN, "$Dir/$FName") or DieDiag("Cannot open '$Dir/$FName'\n");
+
+ my $BugType = "";
+ my $BugFile = "";
+ my $BugFunction = "";
+ my $BugCategory = "";
+ my $BugDescription = "";
+ my $BugPathLength = 1;
+ my $BugLine = 0;
+
+ while (<IN>) {
+ last if (/<!-- BUGMETAEND -->/);
+
+ if (/<!-- BUGTYPE (.*) -->$/) {
+ $BugType = $1;
+ }
+ elsif (/<!-- BUGFILE (.*) -->$/) {
+ $BugFile = abs_path($1);
+ if (!defined $BugFile) {
+ # The file no longer exists: use the original path.
+ $BugFile = $1;
+ }
+ UpdatePrefix($BugFile);
+ }
+ elsif (/<!-- BUGPATHLENGTH (.*) -->$/) {
+ $BugPathLength = $1;
+ }
+ elsif (/<!-- BUGLINE (.*) -->$/) {
+ $BugLine = $1;
+ }
+ elsif (/<!-- BUGCATEGORY (.*) -->$/) {
+ $BugCategory = $1;
+ }
+ elsif (/<!-- BUGDESC (.*) -->$/) {
+ $BugDescription = $1;
+ }
+ elsif (/<!-- FUNCTIONNAME (.*) -->$/) {
+ $BugFunction = $1;
+ }
+
+ }
+
+
+ close(IN);
+
+ if (!defined $BugCategory) {
+ $BugCategory = "Other";
+ }
+
+ # Don't add internal statistics to the bug reports
+ if ($BugCategory =~ /statistics/i) {
+ AddStatLine($BugDescription, $Stats, $BugFile);
+ return;
+ }
+
+ push @$Index,[ $FName, $BugCategory, $BugType, $BugFile, $BugFunction, $BugLine,
+ $BugPathLength ];
+}
+
+##----------------------------------------------------------------------------##
+# CopyFiles - Copy resource files to target directory.
+##----------------------------------------------------------------------------##
+
+sub CopyFiles {
+
+ my $Dir = shift;
+
+ my $JS = Cwd::realpath("$RealBin/../share/scan-build/sorttable.js");
+
+ DieDiag("Cannot find 'sorttable.js'.\n")
+ if (! -r $JS);
+
+ copy($JS, "$Dir");
+
+ DieDiag("Could not copy 'sorttable.js' to '$Dir'.\n")
+ if (! -r "$Dir/sorttable.js");
+
+ my $CSS = Cwd::realpath("$RealBin/../share/scan-build/scanview.css");
+
+ DieDiag("Cannot find 'scanview.css'.\n")
+ if (! -r $CSS);
+
+ copy($CSS, "$Dir");
+
+ DieDiag("Could not copy 'scanview.css' to '$Dir'.\n")
+ if (! -r $CSS);
+}
+
+##----------------------------------------------------------------------------##
+# CalcStats - Calculates visitation statistics and returns the string.
+##----------------------------------------------------------------------------##
+
+sub CalcStats {
+ my $Stats = shift;
+
+ my $TotalBlocks = 0;
+ my $UnreachedBlocks = 0;
+ my $TotalFunctions = scalar(@$Stats);
+ my $BlockAborted = 0;
+ my $WorkListAborted = 0;
+ my $Aborted = 0;
+
+ # Calculate the unique files
+ my $FilesHash = {};
+
+ foreach my $Row (@$Stats) {
+ $FilesHash->{$Row->{Filename}} = 1;
+ $TotalBlocks += $Row->{Total};
+ $UnreachedBlocks += $Row->{Unreachable};
+ $BlockAborted++ if $Row->{Aborted} eq 'yes';
+ $WorkListAborted++ if $Row->{Empty} eq 'no';
+ $Aborted++ if $Row->{Aborted} eq 'yes' || $Row->{Empty} eq 'no';
+ }
+
+ my $TotalFiles = scalar(keys(%$FilesHash));
+
+ # Calculations
+ my $PercentAborted = sprintf("%.2f", $Aborted / $TotalFunctions * 100);
+ my $PercentBlockAborted = sprintf("%.2f", $BlockAborted / $TotalFunctions
+ * 100);
+ my $PercentWorkListAborted = sprintf("%.2f", $WorkListAborted /
+ $TotalFunctions * 100);
+ my $PercentBlocksUnreached = sprintf("%.2f", $UnreachedBlocks / $TotalBlocks
+ * 100);
+
+ my $StatsString = "Analyzed $TotalBlocks blocks in $TotalFunctions functions"
+ . " in $TotalFiles files\n"
+ . "$Aborted functions aborted early ($PercentAborted%)\n"
+ . "$BlockAborted had aborted blocks ($PercentBlockAborted%)\n"
+ . "$WorkListAborted had unfinished worklists ($PercentWorkListAborted%)\n"
+ . "$UnreachedBlocks blocks were never reached ($PercentBlocksUnreached%)\n";
+
+ return $StatsString;
+}
+
+##----------------------------------------------------------------------------##
+# Postprocess - Postprocess the results of an analysis scan.
+##----------------------------------------------------------------------------##
+
+my @filesFound;
+my $baseDir;
+sub FileWanted {
+ my $baseDirRegEx = quotemeta $baseDir;
+ my $file = $File::Find::name;
+
+ # The name of the file is generated by clang binary (HTMLDiagnostics.cpp)
+ if ($file =~ /report-.*\.html$/) {
+ my $relative_file = $file;
+ $relative_file =~ s/$baseDirRegEx//g;
+ push @filesFound, $relative_file;
+ }
+}
+
+sub Postprocess {
+
+ my $Dir = shift;
+ my $BaseDir = shift;
+ my $AnalyzerStats = shift;
+ my $KeepEmpty = shift;
+
+ die "No directory specified." if (!defined $Dir);
+
+ if (! -d $Dir) {
+ Diag("No bugs found.\n");
+ return 0;
+ }
+
+ $baseDir = $Dir . "/";
+ find({ wanted => \&FileWanted, follow => 0}, $Dir);
+
+ if (scalar(@filesFound) == 0 and ! -e "$Dir/failures") {
+ if (! $KeepEmpty) {
+ Diag("Removing directory '$Dir' because it contains no reports.\n");
+ rmtree($Dir) or die "Cannot rmtree '$Dir' : $!";
+ }
+ Diag("No bugs found.\n");
+ return 0;
+ }
+
+ # Scan each report file and build an index.
+ my @Index;
+ my @Stats;
+ foreach my $file (@filesFound) { ScanFile(\@Index, $Dir, $file, \@Stats); }
+
+ # Scan the failures directory and use the information in the .info files
+ # to update the common prefix directory.
+ my @failures;
+ my @attributes_ignored;
+ if (-d "$Dir/failures") {
+ opendir(DIR, "$Dir/failures");
+ @failures = grep { /[.]info.txt$/ && !/attribute_ignored/; } readdir(DIR);
+ closedir(DIR);
+ opendir(DIR, "$Dir/failures");
+ @attributes_ignored = grep { /^attribute_ignored/; } readdir(DIR);
+ closedir(DIR);
+ foreach my $file (@failures) {
+ open IN, "$Dir/failures/$file" or DieDiag("cannot open $file\n");
+ my $Path = <IN>;
+ if (defined $Path) { UpdatePrefix($Path); }
+ close IN;
+ }
+ }
+
+ # Generate an index.html file.
+ my $FName = "$Dir/index.html";
+ open(OUT, ">", $FName) or DieDiag("Cannot create file '$FName'\n");
+
+ # Print out the header.
+
+print OUT <<ENDTEXT;
+<html>
+<head>
+<title>${Options{HtmlTitle}}</title>
+<link type="text/css" rel="stylesheet" href="scanview.css"/>
+<script src="sorttable.js"></script>
+<script language='javascript' type="text/javascript">
+function SetDisplay(RowClass, DisplayVal)
+{
+ var Rows = document.getElementsByTagName("tr");
+ for ( var i = 0 ; i < Rows.length; ++i ) {
+ if (Rows[i].className == RowClass) {
+ Rows[i].style.display = DisplayVal;
+ }
+ }
+}
+
+function CopyCheckedStateToCheckButtons(SummaryCheckButton) {
+ var Inputs = document.getElementsByTagName("input");
+ for ( var i = 0 ; i < Inputs.length; ++i ) {
+ if (Inputs[i].type == "checkbox") {
+ if(Inputs[i] != SummaryCheckButton) {
+ Inputs[i].checked = SummaryCheckButton.checked;
+ Inputs[i].onclick();
+ }
+ }
+ }
+}
+
+function returnObjById( id ) {
+ if (document.getElementById)
+ var returnVar = document.getElementById(id);
+ else if (document.all)
+ var returnVar = document.all[id];
+ else if (document.layers)
+ var returnVar = document.layers[id];
+ return returnVar;
+}
+
+var NumUnchecked = 0;
+
+function ToggleDisplay(CheckButton, ClassName) {
+ if (CheckButton.checked) {
+ SetDisplay(ClassName, "");
+ if (--NumUnchecked == 0) {
+ returnObjById("AllBugsCheck").checked = true;
+ }
+ }
+ else {
+ SetDisplay(ClassName, "none");
+ NumUnchecked++;
+ returnObjById("AllBugsCheck").checked = false;
+ }
+}
+</script>
+<!-- SUMMARYENDHEAD -->
+</head>
+<body>
+<h1>${Options{HtmlTitle}}</h1>
+
+<table>
+<tr><th>User:</th><td>${UserName}\@${HostName}</td></tr>
+<tr><th>Working Directory:</th><td>${CurrentDir}</td></tr>
+<tr><th>Command Line:</th><td>${CmdArgs}</td></tr>
+<tr><th>Clang Version:</th><td>${ClangVersion}</td></tr>
+<tr><th>Date:</th><td>${Date}</td></tr>
+ENDTEXT
+
+print OUT "<tr><th>Version:</th><td>${BuildName} (${BuildDate})</td></tr>\n"
+ if (defined($BuildName) && defined($BuildDate));
+
+print OUT <<ENDTEXT;
+</table>
+ENDTEXT
+
+ if (scalar(@filesFound)) {
+ # Print out the summary table.
+ my %Totals;
+
+ for my $row ( @Index ) {
+ my $bug_type = ($row->[2]);
+ my $bug_category = ($row->[1]);
+ my $key = "$bug_category:$bug_type";
+
+ if (!defined $Totals{$key}) { $Totals{$key} = [1,$bug_category,$bug_type]; }
+ else { $Totals{$key}->[0]++; }
+ }
+
+ print OUT "<h2>Bug Summary</h2>";
+
+ if (defined $BuildName) {
+ print OUT "\n<p>Results in this analysis run are based on analyzer build <b>$BuildName</b>.</p>\n"
+ }
+
+ my $TotalBugs = scalar(@Index);
+print OUT <<ENDTEXT;
+<table>
+<thead><tr><td>Bug Type</td><td>Quantity</td><td class="sorttable_nosort">Display?</td></tr></thead>
+<tr style="font-weight:bold"><td class="SUMM_DESC">All Bugs</td><td class="Q">$TotalBugs</td><td><center><input type="checkbox" id="AllBugsCheck" onClick="CopyCheckedStateToCheckButtons(this);" checked/></center></td></tr>
+ENDTEXT
+
+ my $last_category;
+
+ for my $key (
+ sort {
+ my $x = $Totals{$a};
+ my $y = $Totals{$b};
+ my $res = $x->[1] cmp $y->[1];
+ $res = $x->[2] cmp $y->[2] if ($res == 0);
+ $res
+ } keys %Totals )
+ {
+ my $val = $Totals{$key};
+ my $category = $val->[1];
+ if (!defined $last_category or $last_category ne $category) {
+ $last_category = $category;
+ print OUT "<tr><th>$category</th><th colspan=2></th></tr>\n";
+ }
+ my $x = lc $key;
+ $x =~ s/[ ,'":\/()]+/_/g;
+ print OUT "<tr><td class=\"SUMM_DESC\">";
+ print OUT $val->[2];
+ print OUT "</td><td class=\"Q\">";
+ print OUT $val->[0];
+ print OUT "</td><td><center><input type=\"checkbox\" onClick=\"ToggleDisplay(this,'bt_$x');\" checked/></center></td></tr>\n";
+ }
+
+ # Print out the table of errors.
+
+print OUT <<ENDTEXT;
+</table>
+<h2>Reports</h2>
+
+<table class="sortable" style="table-layout:automatic">
+<thead><tr>
+ <td>Bug Group</td>
+ <td class="sorttable_sorted">Bug Type<span id="sorttable_sortfwdind">&nbsp;&#x25BE;</span></td>
+ <td>File</td>
+ <td>Function/Method</td>
+ <td class="Q">Line</td>
+ <td class="Q">Path Length</td>
+ <td class="sorttable_nosort"></td>
+ <!-- REPORTBUGCOL -->
+</tr></thead>
+<tbody>
+ENDTEXT
+
+ my $prefix = GetPrefix();
+ my $regex;
+ my $InFileRegex;
+ my $InFilePrefix = "File:</td><td>";
+
+ if (defined $prefix) {
+ $regex = qr/^\Q$prefix\E/is;
+ $InFileRegex = qr/\Q$InFilePrefix$prefix\E/is;
+ }
+
+ for my $row ( sort { $a->[2] cmp $b->[2] } @Index ) {
+ my $x = "$row->[1]:$row->[2]";
+ $x = lc $x;
+ $x =~ s/[ ,'":\/()]+/_/g;
+
+ my $ReportFile = $row->[0];
+
+ print OUT "<tr class=\"bt_$x\">";
+ print OUT "<td class=\"DESC\">";
+ print OUT $row->[1];
+ print OUT "</td>";
+ print OUT "<td class=\"DESC\">";
+ print OUT $row->[2];
+ print OUT "</td>";
+
+ # Update the file prefix.
+ my $fname = $row->[3];
+
+ if (defined $regex) {
+ $fname =~ s/$regex//;
+ UpdateInFilePath("$Dir/$ReportFile", $InFileRegex, $InFilePrefix)
+ }
+
+ print OUT "<td>";
+ my @fname = split /\//,$fname;
+ if ($#fname > 0) {
+ while ($#fname >= 0) {
+ my $x = shift @fname;
+ print OUT $x;
+ if ($#fname >= 0) {
+ print OUT "/";
+ }
+ }
+ }
+ else {
+ print OUT $fname;
+ }
+ print OUT "</td>";
+
+ print OUT "<td class=\"DESC\">";
+ print OUT $row->[4];
+ print OUT "</td>";
+
+ # Print out the quantities.
+ for my $j ( 5 .. 6 ) {
+ print OUT "<td class=\"Q\">$row->[$j]</td>";
+ }
+
+ # Print the rest of the columns.
+ for (my $j = 7; $j <= $#{$row}; ++$j) {
+ print OUT "<td>$row->[$j]</td>"
+ }
+
+ # Emit the "View" link.
+ print OUT "<td><a href=\"$ReportFile#EndPath\">View Report</a></td>";
+
+ # Emit REPORTBUG markers.
+ print OUT "\n<!-- REPORTBUG id=\"$ReportFile\" -->\n";
+
+ # End the row.
+ print OUT "</tr>\n";
+ }
+
+ print OUT "</tbody>\n</table>\n\n";
+ }
+
+ if (scalar (@failures) || scalar(@attributes_ignored)) {
+ print OUT "<h2>Analyzer Failures</h2>\n";
+
+ if (scalar @attributes_ignored) {
+ print OUT "The analyzer's parser ignored the following attributes:<p>\n";
+ print OUT "<table>\n";
+ print OUT "<thead><tr><td>Attribute</td><td>Source File</td><td>Preprocessed File</td><td>STDERR Output</td></tr></thead>\n";
+ foreach my $file (sort @attributes_ignored) {
+ die "cannot demangle attribute name\n" if (! ($file =~ /^attribute_ignored_(.+).txt/));
+ my $attribute = $1;
+ # Open the attribute file to get the first file that failed.
+ next if (!open (ATTR, "$Dir/failures/$file"));
+ my $ppfile = <ATTR>;
+ chomp $ppfile;
+ close ATTR;
+ next if (! -e "$Dir/failures/$ppfile");
+ # Open the info file and get the name of the source file.
+ open (INFO, "$Dir/failures/$ppfile.info.txt") or
+ die "Cannot open $Dir/failures/$ppfile.info.txt\n";
+ my $srcfile = <INFO>;
+ chomp $srcfile;
+ close (INFO);
+ # Print the information in the table.
+ my $prefix = GetPrefix();
+ if (defined $prefix) { $srcfile =~ s/^\Q$prefix//; }
+ print OUT "<tr><td>$attribute</td><td>$srcfile</td><td><a href=\"failures/$ppfile\">$ppfile</a></td><td><a href=\"failures/$ppfile.stderr.txt\">$ppfile.stderr.txt</a></td></tr>\n";
+ my $ppfile_clang = $ppfile;
+ $ppfile_clang =~ s/[.](.+)$/.clang.$1/;
+ print OUT " <!-- REPORTPROBLEM src=\"$srcfile\" file=\"failures/$ppfile\" clangfile=\"failures/$ppfile_clang\" stderr=\"failures/$ppfile.stderr.txt\" info=\"failures/$ppfile.info.txt\" -->\n";
+ }
+ print OUT "</table>\n";
+ }
+
+ if (scalar @failures) {
+ print OUT "<p>The analyzer had problems processing the following files:</p>\n";
+ print OUT "<table>\n";
+ print OUT "<thead><tr><td>Problem</td><td>Source File</td><td>Preprocessed File</td><td>STDERR Output</td></tr></thead>\n";
+ foreach my $file (sort @failures) {
+ $file =~ /(.+).info.txt$/;
+ # Get the preprocessed file.
+ my $ppfile = $1;
+ # Open the info file and get the name of the source file.
+ open (INFO, "$Dir/failures/$file") or
+ die "Cannot open $Dir/failures/$file\n";
+ my $srcfile = <INFO>;
+ chomp $srcfile;
+ my $problem = <INFO>;
+ chomp $problem;
+ close (INFO);
+ # Print the information in the table.
+ my $prefix = GetPrefix();
+ if (defined $prefix) { $srcfile =~ s/^\Q$prefix//; }
+ print OUT "<tr><td>$problem</td><td>$srcfile</td><td><a href=\"failures/$ppfile\">$ppfile</a></td><td><a href=\"failures/$ppfile.stderr.txt\">$ppfile.stderr.txt</a></td></tr>\n";
+ my $ppfile_clang = $ppfile;
+ $ppfile_clang =~ s/[.](.+)$/.clang.$1/;
+ print OUT " <!-- REPORTPROBLEM src=\"$srcfile\" file=\"failures/$ppfile\" clangfile=\"failures/$ppfile_clang\" stderr=\"failures/$ppfile.stderr.txt\" info=\"failures/$ppfile.info.txt\" -->\n";
+ }
+ print OUT "</table>\n";
+ }
+ print OUT "<p>Please consider submitting preprocessed files as <a href=\"http://clang-analyzer.llvm.org/filing_bugs.html\">bug reports</a>. <!-- REPORTCRASHES --> </p>\n";
+ }
+
+ print OUT "</body></html>\n";
+ close(OUT);
+ CopyFiles($Dir);
+
+ # Make sure $Dir and $BaseDir are world readable/executable.
+ chmod(0755, $Dir);
+ if (defined $BaseDir) { chmod(0755, $BaseDir); }
+
+ # Print statistics
+ print CalcStats(\@Stats) if $AnalyzerStats;
+
+ my $Num = scalar(@Index);
+ if ($Num == 1) {
+ Diag("$Num bug found.\n");
+ } else {
+ Diag("$Num bugs found.\n");
+ }
+ if ($Num > 0 && -r "$Dir/index.html") {
+ Diag("Run 'scan-view $Dir' to examine bug reports.\n");
+ }
+
+ DiagCrashes($Dir) if (scalar @failures || scalar @attributes_ignored);
+
+ return $Num;
+}
+
+##----------------------------------------------------------------------------##
+# RunBuildCommand - Run the build command.
+##----------------------------------------------------------------------------##
+
+sub AddIfNotPresent {
+ my $Args = shift;
+ my $Arg = shift;
+ my $found = 0;
+
+ foreach my $k (@$Args) {
+ if ($k eq $Arg) {
+ $found = 1;
+ last;
+ }
+ }
+
+ if ($found == 0) {
+ push @$Args, $Arg;
+ }
+}
+
+sub SetEnv {
+ my $EnvVars = shift @_;
+ foreach my $var ('CC', 'CXX', 'CLANG', 'CLANG_CXX',
+ 'CCC_ANALYZER_ANALYSIS', 'CCC_ANALYZER_PLUGINS',
+ 'CCC_ANALYZER_CONFIG') {
+ die "$var is undefined\n" if (!defined $var);
+ $ENV{$var} = $EnvVars->{$var};
+ }
+ foreach my $var ('CCC_ANALYZER_STORE_MODEL',
+ 'CCC_ANALYZER_CONSTRAINTS_MODEL',
+ 'CCC_ANALYZER_INTERNAL_STATS',
+ 'CCC_ANALYZER_OUTPUT_FORMAT',
+ 'CCC_CC',
+ 'CCC_CXX',
+ 'CCC_REPORT_FAILURES',
+ 'CLANG_ANALYZER_TARGET') {
+ my $x = $EnvVars->{$var};
+ if (defined $x) { $ENV{$var} = $x }
+ }
+ my $Verbose = $EnvVars->{'VERBOSE'};
+ if ($Verbose >= 2) {
+ $ENV{'CCC_ANALYZER_VERBOSE'} = 1;
+ }
+ if ($Verbose >= 3) {
+ $ENV{'CCC_ANALYZER_LOG'} = 1;
+ }
+}
+
+sub RunXcodebuild {
+ my $Args = shift;
+ my $IgnoreErrors = shift;
+ my $CCAnalyzer = shift;
+ my $CXXAnalyzer = shift;
+ my $EnvVars = shift;
+
+ if ($IgnoreErrors) {
+ AddIfNotPresent($Args,"-PBXBuildsContinueAfterErrors=YES");
+ }
+
+ # Detect the version of Xcode. If Xcode 4.6 or higher, use new
+ # in situ support for analyzer interposition without needed to override
+ # the compiler.
+ open(DETECT_XCODE, "-|", $Args->[0], "-version") or
+ die "error: cannot detect version of xcodebuild\n";
+
+ my $oldBehavior = 1;
+
+ while(<DETECT_XCODE>) {
+ if (/^Xcode (.+)$/) {
+ my $ver = $1;
+ if ($ver =~ /^([0-9]+[.][0-9]+)[^0-9]?/) {
+ if ($1 >= 4.6) {
+ $oldBehavior = 0;
+ last;
+ }
+ }
+ }
+ }
+ close(DETECT_XCODE);
+
+ # If --override-compiler is explicitely requested, resort to the old
+ # behavior regardless of Xcode version.
+ if ($Options{OverrideCompiler}) {
+ $oldBehavior = 1;
+ }
+
+ if ($oldBehavior == 0) {
+ my $OutputDir = $EnvVars->{"OUTPUT_DIR"};
+ my $CLANG = $EnvVars->{"CLANG"};
+ my $OtherFlags = $EnvVars->{"CCC_ANALYZER_ANALYSIS"};
+ push @$Args,
+ "RUN_CLANG_STATIC_ANALYZER=YES",
+ "CLANG_ANALYZER_OUTPUT=plist-html",
+ "CLANG_ANALYZER_EXEC=$CLANG",
+ "CLANG_ANALYZER_OUTPUT_DIR=$OutputDir",
+ "CLANG_ANALYZER_OTHER_FLAGS=$OtherFlags";
+
+ return (system(@$Args) >> 8);
+ }
+
+ # Default to old behavior where we insert a bogus compiler.
+ SetEnv($EnvVars);
+
+ # Check if using iPhone SDK 3.0 (simulator). If so the compiler being
+ # used should be gcc-4.2.
+ if (!defined $ENV{"CCC_CC"}) {
+ for (my $i = 0 ; $i < scalar(@$Args); ++$i) {
+ if ($Args->[$i] eq "-sdk" && $i + 1 < scalar(@$Args)) {
+ if (@$Args[$i+1] =~ /^iphonesimulator3/) {
+ $ENV{"CCC_CC"} = "gcc-4.2";
+ $ENV{"CCC_CXX"} = "g++-4.2";
+ }
+ }
+ }
+ }
+
+ # Disable PCH files until clang supports them.
+ AddIfNotPresent($Args,"GCC_PRECOMPILE_PREFIX_HEADER=NO");
+
+ # When 'CC' is set, xcodebuild uses it to do all linking, even if we are
+ # linking C++ object files. Set 'LDPLUSPLUS' so that xcodebuild uses 'g++'
+ # (via c++-analyzer) when linking such files.
+ $ENV{"LDPLUSPLUS"} = $CXXAnalyzer;
+
+ return (system(@$Args) >> 8);
+}
+
+sub RunBuildCommand {
+ my $Args = shift;
+ my $IgnoreErrors = shift;
+ my $Cmd = $Args->[0];
+ my $CCAnalyzer = shift;
+ my $CXXAnalyzer = shift;
+ my $EnvVars = shift;
+
+ if ($Cmd =~ /\bxcodebuild$/) {
+ return RunXcodebuild($Args, $IgnoreErrors, $CCAnalyzer, $CXXAnalyzer, $EnvVars);
+ }
+
+ # Setup the environment.
+ SetEnv($EnvVars);
+
+ if ($Cmd =~ /(.*\/?gcc[^\/]*$)/ or
+ $Cmd =~ /(.*\/?cc[^\/]*$)/ or
+ $Cmd =~ /(.*\/?llvm-gcc[^\/]*$)/ or
+ $Cmd =~ /(.*\/?clang$)/ or
+ $Cmd =~ /(.*\/?ccc-analyzer[^\/]*$)/) {
+
+ if (!($Cmd =~ /ccc-analyzer/) and !defined $ENV{"CCC_CC"}) {
+ $ENV{"CCC_CC"} = $1;
+ }
+
+ shift @$Args;
+ unshift @$Args, $CCAnalyzer;
+ }
+ elsif ($Cmd =~ /(.*\/?g\+\+[^\/]*$)/ or
+ $Cmd =~ /(.*\/?c\+\+[^\/]*$)/ or
+ $Cmd =~ /(.*\/?llvm-g\+\+[^\/]*$)/ or
+ $Cmd =~ /(.*\/?clang\+\+$)/ or
+ $Cmd =~ /(.*\/?c\+\+-analyzer[^\/]*$)/) {
+ if (!($Cmd =~ /c\+\+-analyzer/) and !defined $ENV{"CCC_CXX"}) {
+ $ENV{"CCC_CXX"} = $1;
+ }
+ shift @$Args;
+ unshift @$Args, $CXXAnalyzer;
+ }
+ elsif ($Cmd eq "make" or $Cmd eq "gmake" or $Cmd eq "mingw32-make") {
+ AddIfNotPresent($Args, "CC=$CCAnalyzer");
+ AddIfNotPresent($Args, "CXX=$CXXAnalyzer");
+ if ($IgnoreErrors) {
+ AddIfNotPresent($Args,"-k");
+ AddIfNotPresent($Args,"-i");
+ }
+ }
+
+ return (system(@$Args) >> 8);
+}
+
+##----------------------------------------------------------------------------##
+# DisplayHelp - Utility function to display all help options.
+##----------------------------------------------------------------------------##
+
+sub DisplayHelp {
+
+ my $ArgClangNotFoundErrMsg = shift;
+print <<ENDTEXT;
+USAGE: $Prog [options] <build command> [build options]
+
+ENDTEXT
+
+ if (defined $BuildName) {
+ print "ANALYZER BUILD: $BuildName ($BuildDate)\n\n";
+ }
+
+print <<ENDTEXT;
+OPTIONS:
+
+ -analyze-headers
+
+ Also analyze functions in #included files. By default, such functions
+ are skipped unless they are called by functions within the main source file.
+
+ -o <output location>
+
+ Specifies the output directory for analyzer reports. Subdirectories will be
+ created as needed to represent separate "runs" of the analyzer. If this
+ option is not specified, a directory is created in /tmp (TMPDIR on Mac OS X)
+ to store the reports.
+
+ -h
+ --help
+
+ Display this message.
+
+ -k
+ --keep-going
+
+ Add a "keep on going" option to the specified build command. This option
+ currently supports make and xcodebuild. This is a convenience option; one
+ can specify this behavior directly using build options.
+
+ --html-title [title]
+ --html-title=[title]
+
+ Specify the title used on generated HTML pages. If not specified, a default
+ title will be used.
+
+ -plist
+
+ By default the output of scan-build is a set of HTML files. This option
+ outputs the results as a set of .plist files.
+
+ -plist-html
+
+ By default the output of scan-build is a set of HTML files. This option
+ outputs the results as a set of HTML and .plist files.
+
+ --status-bugs
+
+ By default, the exit status of scan-build is the same as the executed build
+ command. Specifying this option causes the exit status of scan-build to be 1
+ if it found potential bugs and 0 otherwise.
+
+ --use-cc [compiler path]
+ --use-cc=[compiler path]
+
+ scan-build analyzes a project by interposing a "fake compiler", which
+ executes a real compiler for compilation and the static analyzer for analysis.
+ Because of the current implementation of interposition, scan-build does not
+ know what compiler your project normally uses. Instead, it simply overrides
+ the CC environment variable, and guesses your default compiler.
+
+ In the future, this interposition mechanism to be improved, but if you need
+ scan-build to use a specific compiler for *compilation* then you can use
+ this option to specify a path to that compiler.
+
+ If the given compiler is a cross compiler, you may also need to provide
+ --analyzer-target option to properly analyze the source code because static
+ analyzer runs as if the code is compiled for the host machine by default.
+
+ --use-c++ [compiler path]
+ --use-c++=[compiler path]
+
+ This is the same as "--use-cc" but for C++ code.
+
+ --analyzer-target [target triple name for analysis]
+ --analyzer-target=[target triple name for analysis]
+
+ This provides target triple information to clang static analyzer.
+ It only changes the target for analysis but doesn't change the target of a
+ real compiler given by --use-cc and --use-c++ options.
+
+ -v
+
+ Enable verbose output from scan-build. A second and third '-v' increases
+ verbosity.
+
+ -V
+ --view
+
+ View analysis results in a web browser when the build completes.
+
+ADVANCED OPTIONS:
+
+ -no-failure-reports
+
+ Do not create a 'failures' subdirectory that includes analyzer crash reports
+ and preprocessed source files.
+
+ -stats
+
+ Generates visitation statistics for the project being analyzed.
+
+ -maxloop <loop count>
+
+ Specifiy the number of times a block can be visited before giving up.
+ Default is 4. Increase for more comprehensive coverage at a cost of speed.
+
+ -internal-stats
+
+ Generate internal analyzer statistics.
+
+ --use-analyzer [Xcode|path to clang]
+ --use-analyzer=[Xcode|path to clang]
+
+ scan-build uses the 'clang' executable relative to itself for static
+ analysis. One can override this behavior with this option by using the
+ 'clang' packaged with Xcode (on OS X) or from the PATH.
+
+ --keep-empty
+
+ Don't remove the build results directory even if no issues were reported.
+
+ --override-compiler
+ Always resort to the ccc-analyzer even when better interposition methods
+ are available.
+
+ -analyzer-config <options>
+
+ Provide options to pass through to the analyzer's -analyzer-config flag.
+ Several options are separated with comma: 'key1=val1,key2=val2'
+
+ Available options:
+ * stable-report-filename=true or false (default)
+ Switch the page naming to:
+ report-<filename>-<function/method name>-<id>.html
+ instead of report-XXXXXX.html
+
+CONTROLLING CHECKERS:
+
+ A default group of checkers are always run unless explicitly disabled.
+ Checkers may be enabled/disabled using the following options:
+
+ -enable-checker [checker name]
+ -disable-checker [checker name]
+
+LOADING CHECKERS:
+
+ Loading external checkers using the clang plugin interface:
+
+ -load-plugin [plugin library]
+ENDTEXT
+
+ if (defined $Clang && -x $Clang) {
+ # Query clang for list of checkers that are enabled.
+
+ # create a list to load the plugins via the 'Xclang' command line
+ # argument
+ my @PluginLoadCommandline_xclang;
+ foreach my $param ( @{$Options{PluginsToLoad}} ) {
+ push ( @PluginLoadCommandline_xclang, "-Xclang" );
+ push ( @PluginLoadCommandline_xclang, "-load" );
+ push ( @PluginLoadCommandline_xclang, "-Xclang" );
+ push ( @PluginLoadCommandline_xclang, $param );
+ }
+
+ my %EnabledCheckers;
+ foreach my $lang ("c", "objective-c", "objective-c++", "c++") {
+ my $ExecLine = join(' ', qq/"$Clang"/, @PluginLoadCommandline_xclang, "--analyze", "-x", $lang, "-", "-###", "2>&1", "|");
+ open(PS, $ExecLine);
+ while (<PS>) {
+ foreach my $val (split /\s+/) {
+ $val =~ s/\"//g;
+ if ($val =~ /-analyzer-checker\=([^\s]+)/) {
+ $EnabledCheckers{$1} = 1;
+ }
+ }
+ }
+ }
+
+ # Query clang for complete list of checkers.
+ my @PluginLoadCommandline;
+ foreach my $param ( @{$Options{PluginsToLoad}} ) {
+ push ( @PluginLoadCommandline, "-load" );
+ push ( @PluginLoadCommandline, $param );
+ }
+
+ my $ExecLine = join(' ', qq/"$Clang"/, "-cc1", @PluginLoadCommandline, "-analyzer-checker-help", "2>&1", "|");
+ open(PS, $ExecLine);
+ my $foundCheckers = 0;
+ while (<PS>) {
+ if (/CHECKERS:/) {
+ $foundCheckers = 1;
+ last;
+ }
+ }
+ if (!$foundCheckers) {
+ print " *** Could not query Clang for the list of available checkers.";
+ }
+ else {
+ print("\nAVAILABLE CHECKERS:\n\n");
+ my $skip = 0;
+ while(<PS>) {
+ if (/experimental/) {
+ $skip = 1;
+ next;
+ }
+ if ($skip) {
+ next if (!/^\s\s[^\s]/);
+ $skip = 0;
+ }
+ s/^\s\s//;
+ if (/^([^\s]+)/) {
+ # Is the checker enabled?
+ my $checker = $1;
+ my $enabled = 0;
+ my $aggregate = "";
+ foreach my $domain (split /\./, $checker) {
+ $aggregate .= $domain;
+ if ($EnabledCheckers{$aggregate}) {
+ $enabled =1;
+ last;
+ }
+ # append a dot, if an additional domain is added in the next iteration
+ $aggregate .= ".";
+ }
+
+ if ($enabled) {
+ print " + ";
+ }
+ else {
+ print " ";
+ }
+ }
+ else {
+ print " ";
+ }
+ print $_;
+ }
+ print "\nNOTE: \"+\" indicates that an analysis is enabled by default.\n";
+ }
+ close PS;
+ }
+ else {
+ print " *** Could not query Clang for the list of available checkers.\n";
+ if (defined $ArgClangNotFoundErrMsg) {
+ print " *** Reason: $ArgClangNotFoundErrMsg\n";
+ }
+ }
+
+print <<ENDTEXT
+
+BUILD OPTIONS
+
+ You can specify any build option acceptable to the build command.
+
+EXAMPLE
+
+ scan-build -o /tmp/myhtmldir make -j4
+
+The above example causes analysis reports to be deposited into a subdirectory
+of "/tmp/myhtmldir" and to run "make" with the "-j4" option. A different
+subdirectory is created each time scan-build analyzes a project. The analyzer
+should support most parallel builds, but not distributed builds.
+
+ENDTEXT
+}
+
+##----------------------------------------------------------------------------##
+# HtmlEscape - HTML entity encode characters that are special in HTML
+##----------------------------------------------------------------------------##
+
+sub HtmlEscape {
+ # copy argument to new variable so we don't clobber the original
+ my $arg = shift || '';
+ my $tmp = $arg;
+ $tmp =~ s/&/&amp;/g;
+ $tmp =~ s/</&lt;/g;
+ $tmp =~ s/>/&gt;/g;
+ return $tmp;
+}
+
+##----------------------------------------------------------------------------##
+# ShellEscape - backslash escape characters that are special to the shell
+##----------------------------------------------------------------------------##
+
+sub ShellEscape {
+ # copy argument to new variable so we don't clobber the original
+ my $arg = shift || '';
+ if ($arg =~ /["\s]/) { return "'" . $arg . "'"; }
+ return $arg;
+}
+
+##----------------------------------------------------------------------------##
+# FindClang - searches for 'clang' executable.
+##----------------------------------------------------------------------------##
+
+sub FindClang {
+ if (!defined $Options{AnalyzerDiscoveryMethod}) {
+ $Clang = Cwd::realpath("$RealBin/bin/clang") if (-f "$RealBin/bin/clang");
+ if (!defined $Clang || ! -x $Clang) {
+ $Clang = Cwd::realpath("$RealBin/clang") if (-f "$RealBin/clang");
+ }
+ if (!defined $Clang || ! -x $Clang) {
+ return "error: Cannot find an executable 'clang' relative to" .
+ " scan-build. Consider using --use-analyzer to pick a version of" .
+ " 'clang' to use for static analysis.\n";
+ }
+ }
+ else {
+ if ($Options{AnalyzerDiscoveryMethod} =~ /^[Xx]code$/) {
+ my $xcrun = `which xcrun`;
+ chomp $xcrun;
+ if ($xcrun eq "") {
+ return "Cannot find 'xcrun' to find 'clang' for analysis.\n";
+ }
+ $Clang = `$xcrun -toolchain XcodeDefault -find clang`;
+ chomp $Clang;
+ if ($Clang eq "") {
+ return "No 'clang' executable found by 'xcrun'\n";
+ }
+ }
+ else {
+ $Clang = $Options{AnalyzerDiscoveryMethod};
+ if (!defined $Clang or not -x $Clang) {
+ return "Cannot find an executable clang at '$Options{AnalyzerDiscoveryMethod}'\n";
+ }
+ }
+ }
+ return undef;
+}
+
+##----------------------------------------------------------------------------##
+# Process command-line arguments.
+##----------------------------------------------------------------------------##
+
+my $RequestDisplayHelp = 0;
+my $ForceDisplayHelp = 0;
+
+sub ProcessArgs {
+ my $Args = shift;
+ my $NumArgs = 0;
+
+ while (@$Args) {
+
+ $NumArgs++;
+
+ # Scan for options we recognize.
+
+ my $arg = $Args->[0];
+
+ if ($arg eq "-h" or $arg eq "--help") {
+ $RequestDisplayHelp = 1;
+ shift @$Args;
+ next;
+ }
+
+ if ($arg eq '-analyze-headers') {
+ shift @$Args;
+ $Options{AnalyzeHeaders} = 1;
+ next;
+ }
+
+ if ($arg eq "-o") {
+ shift @$Args;
+
+ if (!@$Args) {
+ DieDiag("'-o' option requires a target directory name.\n");
+ }
+
+ # Construct an absolute path. Uses the current working directory
+ # as a base if the original path was not absolute.
+ my $OutDir = shift @$Args;
+ mkpath($OutDir) unless (-e $OutDir); # abs_path wants existing dir
+ $Options{OutputDir} = abs_path($OutDir);
+
+ next;
+ }
+
+ if ($arg =~ /^--html-title(=(.+))?$/) {
+ shift @$Args;
+
+ if (!defined $2 || $2 eq '') {
+ if (!@$Args) {
+ DieDiag("'--html-title' option requires a string.\n");
+ }
+
+ $Options{HtmlTitle} = shift @$Args;
+ } else {
+ $Options{HtmlTitle} = $2;
+ }
+
+ next;
+ }
+
+ if ($arg eq "-k" or $arg eq "--keep-going") {
+ shift @$Args;
+ $Options{IgnoreErrors} = 1;
+ next;
+ }
+
+ if ($arg =~ /^--use-cc(=(.+))?$/) {
+ shift @$Args;
+ my $cc;
+
+ if (!defined $2 || $2 eq "") {
+ if (!@$Args) {
+ DieDiag("'--use-cc' option requires a compiler executable name.\n");
+ }
+ $cc = shift @$Args;
+ }
+ else {
+ $cc = $2;
+ }
+
+ $Options{UseCC} = $cc;
+ next;
+ }
+
+ if ($arg =~ /^--use-c\+\+(=(.+))?$/) {
+ shift @$Args;
+ my $cxx;
+
+ if (!defined $2 || $2 eq "") {
+ if (!@$Args) {
+ DieDiag("'--use-c++' option requires a compiler executable name.\n");
+ }
+ $cxx = shift @$Args;
+ }
+ else {
+ $cxx = $2;
+ }
+
+ $Options{UseCXX} = $cxx;
+ next;
+ }
+
+ if ($arg =~ /^--analyzer-target(=(.+))?$/) {
+ shift @ARGV;
+ my $AnalyzerTarget;
+
+ if (!defined $2 || $2 eq "") {
+ if (!@ARGV) {
+ DieDiag("'--analyzer-target' option requires a target triple name.\n");
+ }
+ $AnalyzerTarget = shift @ARGV;
+ }
+ else {
+ $AnalyzerTarget = $2;
+ }
+
+ $Options{AnalyzerTarget} = $AnalyzerTarget;
+ next;
+ }
+
+ if ($arg eq "-v") {
+ shift @$Args;
+ $Options{Verbose}++;
+ next;
+ }
+
+ if ($arg eq "-V" or $arg eq "--view") {
+ shift @$Args;
+ $Options{ViewResults} = 1;
+ next;
+ }
+
+ if ($arg eq "--status-bugs") {
+ shift @$Args;
+ $Options{ExitStatusFoundBugs} = 1;
+ next;
+ }
+
+ if ($arg eq "-store") {
+ shift @$Args;
+ $Options{StoreModel} = shift @$Args;
+ next;
+ }
+
+ if ($arg eq "-constraints") {
+ shift @$Args;
+ $Options{ConstraintsModel} = shift @$Args;
+ next;
+ }
+
+ if ($arg eq "-internal-stats") {
+ shift @$Args;
+ $Options{InternalStats} = 1;
+ next;
+ }
+
+ if ($arg eq "-plist") {
+ shift @$Args;
+ $Options{OutputFormat} = "plist";
+ next;
+ }
+
+ if ($arg eq "-plist-html") {
+ shift @$Args;
+ $Options{OutputFormat} = "plist-html";
+ next;
+ }
+
+ if ($arg eq "-analyzer-config") {
+ shift @$Args;
+ push @{$Options{ConfigOptions}}, shift @$Args;
+ next;
+ }
+
+ if ($arg eq "-no-failure-reports") {
+ shift @$Args;
+ $Options{ReportFailures} = 0;
+ next;
+ }
+
+ if ($arg eq "-stats") {
+ shift @$Args;
+ $Options{AnalyzerStats} = 1;
+ next;
+ }
+
+ if ($arg eq "-maxloop") {
+ shift @$Args;
+ $Options{MaxLoop} = shift @$Args;
+ next;
+ }
+
+ if ($arg eq "-enable-checker") {
+ shift @$Args;
+ my $Checker = shift @$Args;
+ # Store $NumArgs to preserve the order the checkers were enabled.
+ $Options{EnableCheckers}{$Checker} = $NumArgs;
+ delete $Options{DisableCheckers}{$Checker};
+ next;
+ }
+
+ if ($arg eq "-disable-checker") {
+ shift @$Args;
+ my $Checker = shift @$Args;
+ # Store $NumArgs to preserve the order the checkers were disabled.
+ $Options{DisableCheckers}{$Checker} = $NumArgs;
+ delete $Options{EnableCheckers}{$Checker};
+ next;
+ }
+
+ if ($arg eq "-load-plugin") {
+ shift @$Args;
+ push @{$Options{PluginsToLoad}}, shift @$Args;
+ next;
+ }
+
+ if ($arg eq "--use-analyzer") {
+ shift @$Args;
+ $Options{AnalyzerDiscoveryMethod} = shift @$Args;
+ next;
+ }
+
+ if ($arg =~ /^--use-analyzer=(.+)$/) {
+ shift @$Args;
+ $Options{AnalyzerDiscoveryMethod} = $1;
+ next;
+ }
+
+ if ($arg eq "--keep-empty") {
+ shift @$Args;
+ $Options{KeepEmpty} = 1;
+ next;
+ }
+
+ if ($arg eq "--override-compiler") {
+ shift @$Args;
+ $Options{OverrideCompiler} = 1;
+ next;
+ }
+
+ DieDiag("unrecognized option '$arg'\n") if ($arg =~ /^-/);
+
+ $NumArgs--;
+ last;
+ }
+ return $NumArgs;
+}
+
+if (!@ARGV) {
+ $ForceDisplayHelp = 1
+}
+
+ProcessArgs(\@ARGV);
+# All arguments are now shifted from @ARGV. The rest is a build command, if any.
+
+if (!@ARGV and !$RequestDisplayHelp) {
+ ErrorDiag("No build command specified.\n\n");
+ $ForceDisplayHelp = 1;
+}
+
+my $ClangNotFoundErrMsg = FindClang();
+
+if ($ForceDisplayHelp || $RequestDisplayHelp) {
+ DisplayHelp($ClangNotFoundErrMsg);
+ exit $ForceDisplayHelp;
+}
+
+DieDiag($ClangNotFoundErrMsg) if (defined $ClangNotFoundErrMsg);
+
+$ClangCXX = $Clang;
+if ($Clang !~ /\+\+(\.exe)?$/) {
+ # If $Clang holds the name of the clang++ executable then we leave
+ # $ClangCXX and $Clang equal, otherwise construct the name of the clang++
+ # executable from the clang executable name.
+
+ # Determine operating system under which this copy of Perl was built.
+ my $IsWinBuild = ($^O =~/msys|cygwin|MSWin32/);
+ if($IsWinBuild) {
+ $ClangCXX =~ s/.exe$/++.exe/;
+ }
+ else {
+ $ClangCXX =~ s/\-\d+\.\d+$//;
+ $ClangCXX .= "++";
+ }
+}
+
+# Make sure to use "" to handle paths with spaces.
+$ClangVersion = HtmlEscape(`"$Clang" --version`);
+
+# Determine where results go.
+$CmdArgs = HtmlEscape(join(' ', map(ShellEscape($_), @ARGV)));
+
+# Determine the output directory for the HTML reports.
+my $BaseDir = $Options{OutputDir};
+$Options{OutputDir} = GetHTMLRunDir($Options{OutputDir});
+
+# Determine the location of ccc-analyzer.
+my $AbsRealBin = Cwd::realpath($RealBin);
+my $Cmd = "$AbsRealBin/../libexec/ccc-analyzer";
+my $CmdCXX = "$AbsRealBin/../libexec/c++-analyzer";
+
+# Portability: use less strict but portable check -e (file exists) instead of
+# non-portable -x (file is executable). On some windows ports -x just checks
+# file extension to determine if a file is executable (see Perl language
+# reference, perlport)
+if (!defined $Cmd || ! -e $Cmd) {
+ $Cmd = "$AbsRealBin/ccc-analyzer";
+ DieDiag("'ccc-analyzer' does not exist at '$Cmd'\n") if(! -e $Cmd);
+}
+if (!defined $CmdCXX || ! -e $CmdCXX) {
+ $CmdCXX = "$AbsRealBin/c++-analyzer";
+ DieDiag("'c++-analyzer' does not exist at '$CmdCXX'\n") if(! -e $CmdCXX);
+}
+
+Diag("Using '$Clang' for static analysis\n");
+
+SetHtmlEnv(\@ARGV, $Options{OutputDir});
+
+my @AnalysesToRun;
+foreach (sort { $Options{EnableCheckers}{$a} <=> $Options{EnableCheckers}{$b} }
+ keys %{$Options{EnableCheckers}}) {
+ # Push checkers in order they were enabled.
+ push @AnalysesToRun, "-analyzer-checker", $_;
+}
+foreach (sort { $Options{DisableCheckers}{$a} <=> $Options{DisableCheckers}{$b} }
+ keys %{$Options{DisableCheckers}}) {
+ # Push checkers in order they were disabled.
+ push @AnalysesToRun, "-analyzer-disable-checker", $_;
+}
+if ($Options{AnalyzeHeaders}) { push @AnalysesToRun, "-analyzer-opt-analyze-headers"; }
+if ($Options{AnalyzerStats}) { push @AnalysesToRun, '-analyzer-checker=debug.Stats'; }
+if ($Options{MaxLoop} > 0) { push @AnalysesToRun, "-analyzer-max-loop $Options{MaxLoop}"; }
+
+# Delay setting up other environment variables in case we can do true
+# interposition.
+my $CCC_ANALYZER_ANALYSIS = join ' ', @AnalysesToRun;
+my $CCC_ANALYZER_PLUGINS = join ' ', map { "-load ".$_ } @{$Options{PluginsToLoad}};
+my $CCC_ANALYZER_CONFIG = join ' ', map { "-analyzer-config ".$_ } @{$Options{ConfigOptions}};
+my %EnvVars = (
+ 'CC' => $Cmd,
+ 'CXX' => $CmdCXX,
+ 'CLANG' => $Clang,
+ 'CLANG_CXX' => $ClangCXX,
+ 'VERBOSE' => $Options{Verbose},
+ 'CCC_ANALYZER_ANALYSIS' => $CCC_ANALYZER_ANALYSIS,
+ 'CCC_ANALYZER_PLUGINS' => $CCC_ANALYZER_PLUGINS,
+ 'CCC_ANALYZER_CONFIG' => $CCC_ANALYZER_CONFIG,
+ 'OUTPUT_DIR' => $Options{OutputDir},
+ 'CCC_CC' => $Options{UseCC},
+ 'CCC_CXX' => $Options{UseCXX},
+ 'CCC_REPORT_FAILURES' => $Options{ReportFailures},
+ 'CCC_ANALYZER_STORE_MODEL' => $Options{StoreModel},
+ 'CCC_ANALYZER_CONSTRAINTS_MODEL' => $Options{ConstraintsModel},
+ 'CCC_ANALYZER_INTERNAL_STATS' => $Options{InternalStats},
+ 'CCC_ANALYZER_OUTPUT_FORMAT' => $Options{OutputFormat},
+ 'CLANG_ANALYZER_TARGET' => $Options{AnalyzerTarget}
+);
+
+# Run the build.
+my $ExitStatus = RunBuildCommand(\@ARGV, $Options{IgnoreErrors}, $Cmd, $CmdCXX,
+ \%EnvVars);
+
+if (defined $Options{OutputFormat}) {
+ if ($Options{OutputFormat} =~ /plist/) {
+ Diag "Analysis run complete.\n";
+ Diag "Analysis results (plist files) deposited in '$Options{OutputDir}'\n";
+ }
+ if ($Options{OutputFormat} =~ /html/) {
+ # Postprocess the HTML directory.
+ my $NumBugs = Postprocess($Options{OutputDir}, $BaseDir,
+ $Options{AnalyzerStats}, $Options{KeepEmpty});
+
+ if ($Options{ViewResults} and -r "$Options{OutputDir}/index.html") {
+ Diag "Analysis run complete.\n";
+ Diag "Viewing analysis results in '$Options{OutputDir}' using scan-view.\n";
+ my $ScanView = Cwd::realpath("$RealBin/scan-view");
+ if (! -x $ScanView) { $ScanView = "scan-view"; }
+ if (! -x $ScanView) { $ScanView = Cwd::realpath("$RealBin/../../scan-view/bin/scan-view"); }
+ exec $ScanView, "$Options{OutputDir}";
+ }
+
+ if ($Options{ExitStatusFoundBugs}) {
+ exit 1 if ($NumBugs > 0);
+ exit 0;
+ }
+ }
+}
+
+exit $ExitStatus;
diff --git a/clang-2690385/tools/scan-build/bin/scan-build.bat b/clang-2690385/tools/scan-build/bin/scan-build.bat
new file mode 100644
index 00000000..77be6746
--- /dev/null
+++ b/clang-2690385/tools/scan-build/bin/scan-build.bat
@@ -0,0 +1 @@
+perl -S scan-build %*
diff --git a/clang-2690385/tools/scan-build/bin/set-xcode-analyzer b/clang-2690385/tools/scan-build/bin/set-xcode-analyzer
new file mode 100755
index 00000000..8e674823
--- /dev/null
+++ b/clang-2690385/tools/scan-build/bin/set-xcode-analyzer
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+
+# [PR 11661] Note that we hardwire to /usr/bin/python because we
+# want to the use the system version of Python on Mac OS X.
+# This one has the scripting bridge enabled.
+
+import sys
+if sys.version_info < (2, 7):
+ print "set-xcode-analyzer requires Python 2.7 or later"
+ sys.exit(1)
+
+import os
+import subprocess
+import re
+import tempfile
+import shutil
+import stat
+from AppKit import *
+
+def FindClangSpecs(path):
+ print "(+) Searching for xcspec file in: ", path
+ for root, dirs, files in os.walk(path):
+ for f in files:
+ if f.endswith(".xcspec") and f.startswith("Clang LLVM"):
+ yield os.path.join(root, f)
+
+def ModifySpec(path, isBuiltinAnalyzer, pathToChecker):
+ t = tempfile.NamedTemporaryFile(delete=False)
+ foundAnalyzer = False
+ with open(path) as f:
+ if isBuiltinAnalyzer:
+ # First search for CLANG_ANALYZER_EXEC. Newer
+ # versions of Xcode set EXEC_PATH to be CLANG_ANALYZER_EXEC.
+ with open(path) as f2:
+ for line in f2:
+ if line.find("CLANG_ANALYZER_EXEC") >= 0:
+ pathToChecker = "$(CLANG_ANALYZER_EXEC)"
+ break
+ # Now create a new file.
+ for line in f:
+ if not foundAnalyzer:
+ if line.find("Static Analyzer") >= 0:
+ foundAnalyzer = True
+ else:
+ m = re.search('^(\s*ExecPath\s*=\s*")', line)
+ if m:
+ line = "".join([m.group(0), pathToChecker, '";\n'])
+ # Do not modify further ExecPath's later in the xcspec.
+ foundAnalyzer = False
+ t.write(line)
+ t.close()
+ print "(+) processing:", path
+ try:
+ shutil.copy(t.name, path)
+ os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
+ except IOError, why:
+ print " (-) Cannot update file:", why, "\n"
+ except OSError, why:
+ print " (-) Cannot update file:", why, "\n"
+ os.unlink(t.name)
+
+def main():
+ from optparse import OptionParser
+ parser = OptionParser('usage: %prog [options]')
+ parser.set_description(__doc__)
+ parser.add_option("--use-checker-build", dest="path",
+ help="Use the Clang located at the provided absolute path, e.g. /Users/foo/checker-1")
+ parser.add_option("--use-xcode-clang", action="store_const",
+ const="$(CLANG)", dest="default",
+ help="Use the Clang bundled with Xcode")
+ (options, args) = parser.parse_args()
+ if options.path is None and options.default is None:
+ parser.error("You must specify a version of Clang to use for static analysis. Specify '-h' for details")
+
+ # determine if Xcode is running
+ for x in NSWorkspace.sharedWorkspace().runningApplications():
+ if x.localizedName().find("Xcode") >= 0:
+ print "(-) You must quit Xcode first before modifying its configuration files."
+ sys.exit(1)
+
+ isBuiltinAnalyzer = False
+ if options.path:
+ # Expand tildes.
+ path = os.path.expanduser(options.path)
+ if not path.endswith("clang"):
+ print "(+) Using Clang bundled with checker build:", path
+ path = os.path.join(path, "bin", "clang");
+ else:
+ print "(+) Using Clang located at:", path
+ else:
+ print "(+) Using the Clang bundled with Xcode"
+ path = options.default
+ isBuiltinAnalyzer = True
+
+ try:
+ xcode_path = subprocess.check_output(["xcode-select", "-print-path"])
+ except AttributeError:
+ # Fall back to the default install location when using Python < 2.7.0
+ xcode_path = "/Developer"
+ if (xcode_path.find(".app/") != -1):
+ # Cut off the 'Developer' dir, as the xcspec lies in another part
+ # of the Xcode.app subtree.
+ xcode_path = xcode_path.rsplit('/Developer', 1)[0]
+
+ foundSpec = False
+ for x in FindClangSpecs(xcode_path):
+ foundSpec = True
+ ModifySpec(x, isBuiltinAnalyzer, path)
+
+ if foundSpec == False:
+ print "(-) No compiler configuration file was found. Xcode's analyzer has not been updated."
+
+if __name__ == '__main__':
+ main()
diff --git a/clang-2690385/tools/scan-build/libexec/c++-analyzer b/clang-2690385/tools/scan-build/libexec/c++-analyzer
new file mode 100755
index 00000000..dda5db9c
--- /dev/null
+++ b/clang-2690385/tools/scan-build/libexec/c++-analyzer
@@ -0,0 +1,8 @@
+#!/usr/bin/env perl
+
+use Cwd qw/ abs_path /;
+use File::Basename qw/ dirname /;
+# Add scan-build dir to the list of places where perl looks for modules.
+use lib dirname(abs_path($0));
+
+do 'ccc-analyzer';
diff --git a/clang-2690385/tools/scan-build/libexec/c++-analyzer.bat b/clang-2690385/tools/scan-build/libexec/c++-analyzer.bat
new file mode 100644
index 00000000..69f048a9
--- /dev/null
+++ b/clang-2690385/tools/scan-build/libexec/c++-analyzer.bat
@@ -0,0 +1 @@
+perl -S c++-analyzer %*
diff --git a/clang-2690385/tools/scan-build/libexec/ccc-analyzer b/clang-2690385/tools/scan-build/libexec/ccc-analyzer
new file mode 100755
index 00000000..831dd42e
--- /dev/null
+++ b/clang-2690385/tools/scan-build/libexec/ccc-analyzer
@@ -0,0 +1,777 @@
+#!/usr/bin/env perl
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# A script designed to interpose between the build system and gcc. It invokes
+# both gcc and the static analyzer.
+#
+##===----------------------------------------------------------------------===##
+
+use strict;
+use warnings;
+use FindBin;
+use Cwd qw/ getcwd abs_path /;
+use File::Temp qw/ tempfile /;
+use File::Path qw / mkpath /;
+use File::Basename;
+use Text::ParseWords;
+
+##===----------------------------------------------------------------------===##
+# List form 'system' with STDOUT and STDERR captured.
+##===----------------------------------------------------------------------===##
+
+sub silent_system {
+ my $HtmlDir = shift;
+ my $Command = shift;
+
+ # Save STDOUT and STDERR and redirect to a temporary file.
+ open OLDOUT, ">&", \*STDOUT;
+ open OLDERR, ">&", \*STDERR;
+ my ($TmpFH, $TmpFile) = tempfile("temp_buf_XXXXXX",
+ DIR => $HtmlDir,
+ UNLINK => 1);
+ open(STDOUT, ">$TmpFile");
+ open(STDERR, ">&", \*STDOUT);
+
+ # Invoke 'system', STDOUT and STDERR are output to a temporary file.
+ system $Command, @_;
+
+ # Restore STDOUT and STDERR.
+ open STDOUT, ">&", \*OLDOUT;
+ open STDERR, ">&", \*OLDERR;
+
+ return $TmpFH;
+}
+
+##===----------------------------------------------------------------------===##
+# Compiler command setup.
+##===----------------------------------------------------------------------===##
+
+# Search in the PATH if the compiler exists
+sub SearchInPath {
+ my $file = shift;
+ foreach my $dir (split (':', $ENV{PATH})) {
+ if (-x "$dir/$file") {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+my $Compiler;
+my $Clang;
+my $DefaultCCompiler;
+my $DefaultCXXCompiler;
+my $IsCXX;
+my $AnalyzerTarget;
+
+# If on OSX, use xcrun to determine the SDK root.
+my $UseXCRUN = 0;
+
+if (`uname -a` =~ m/Darwin/) {
+ $DefaultCCompiler = 'clang';
+ $DefaultCXXCompiler = 'clang++';
+ # Older versions of OSX do not have xcrun to
+ # query the SDK location.
+ if (-x "/usr/bin/xcrun") {
+ $UseXCRUN = 1;
+ }
+} else {
+ $DefaultCCompiler = 'gcc';
+ $DefaultCXXCompiler = 'g++';
+}
+
+if ($FindBin::Script =~ /c\+\+-analyzer/) {
+ $Compiler = $ENV{'CCC_CXX'};
+ if (!defined $Compiler || (! -x $Compiler && ! SearchInPath($Compiler))) { $Compiler = $DefaultCXXCompiler; }
+
+ $Clang = $ENV{'CLANG_CXX'};
+ if (!defined $Clang || ! -x $Clang) { $Clang = 'clang++'; }
+
+ $IsCXX = 1
+}
+else {
+ $Compiler = $ENV{'CCC_CC'};
+ if (!defined $Compiler || (! -x $Compiler && ! SearchInPath($Compiler))) { $Compiler = $DefaultCCompiler; }
+
+ $Clang = $ENV{'CLANG'};
+ if (!defined $Clang || ! -x $Clang) { $Clang = 'clang'; }
+
+ $IsCXX = 0
+}
+
+$AnalyzerTarget = $ENV{'CLANG_ANALYZER_TARGET'};
+
+##===----------------------------------------------------------------------===##
+# Cleanup.
+##===----------------------------------------------------------------------===##
+
+my $ReportFailures = $ENV{'CCC_REPORT_FAILURES'};
+if (!defined $ReportFailures) { $ReportFailures = 1; }
+
+my $CleanupFile;
+my $ResultFile;
+
+# Remove any stale files at exit.
+END {
+ if (defined $ResultFile && -z $ResultFile) {
+ unlink($ResultFile);
+ }
+ if (defined $CleanupFile) {
+ unlink($CleanupFile);
+ }
+}
+
+##----------------------------------------------------------------------------##
+# Process Clang Crashes.
+##----------------------------------------------------------------------------##
+
+sub GetPPExt {
+ my $Lang = shift;
+ if ($Lang =~ /objective-c\+\+/) { return ".mii" };
+ if ($Lang =~ /objective-c/) { return ".mi"; }
+ if ($Lang =~ /c\+\+/) { return ".ii"; }
+ return ".i";
+}
+
+# Set this to 1 if we want to include 'parser rejects' files.
+my $IncludeParserRejects = 0;
+my $ParserRejects = "Parser Rejects";
+my $AttributeIgnored = "Attribute Ignored";
+my $OtherError = "Other Error";
+
+sub ProcessClangFailure {
+ my ($Clang, $Lang, $file, $Args, $HtmlDir, $ErrorType, $ofile) = @_;
+ my $Dir = "$HtmlDir/failures";
+ mkpath $Dir;
+
+ my $prefix = "clang_crash";
+ if ($ErrorType eq $ParserRejects) {
+ $prefix = "clang_parser_rejects";
+ }
+ elsif ($ErrorType eq $AttributeIgnored) {
+ $prefix = "clang_attribute_ignored";
+ }
+ elsif ($ErrorType eq $OtherError) {
+ $prefix = "clang_other_error";
+ }
+
+ # Generate the preprocessed file with Clang.
+ my ($PPH, $PPFile) = tempfile( $prefix . "_XXXXXX",
+ SUFFIX => GetPPExt($Lang),
+ DIR => $Dir);
+ close ($PPH);
+ system $Clang, @$Args, "-E", "-o", $PPFile;
+
+ # Create the info file.
+ open (OUT, ">", "$PPFile.info.txt") or die "Cannot open $PPFile.info.txt\n";
+ print OUT abs_path($file), "\n";
+ print OUT "$ErrorType\n";
+ print OUT "@$Args\n";
+ close OUT;
+ `uname -a >> $PPFile.info.txt 2>&1`;
+ `"$Compiler" -v >> $PPFile.info.txt 2>&1`;
+ rename($ofile, "$PPFile.stderr.txt");
+ return (basename $PPFile);
+}
+
+##----------------------------------------------------------------------------##
+# Running the analyzer.
+##----------------------------------------------------------------------------##
+
+sub GetCCArgs {
+ my $HtmlDir = shift;
+ my $mode = shift;
+ my $Args = shift;
+ my $line;
+ my $OutputStream = silent_system($HtmlDir, $Clang, "-###", $mode, @$Args);
+ while (<$OutputStream>) {
+ next if (!/\s"?-cc1"?\s/);
+ $line = $_;
+ }
+ die "could not find clang line\n" if (!defined $line);
+ # Strip leading and trailing whitespace characters.
+ $line =~ s/^\s+|\s+$//g;
+ my @items = quotewords('\s+', 0, $line);
+ my $cmd = shift @items;
+ die "cannot find 'clang' in 'clang' command\n" if (!($cmd =~ /clang/));
+ return \@items;
+}
+
+sub Analyze {
+ my ($Clang, $OriginalArgs, $AnalyzeArgs, $Lang, $Output, $Verbose, $HtmlDir,
+ $file) = @_;
+
+ my @Args = @$OriginalArgs;
+ my $Cmd;
+ my @CmdArgs;
+ my @CmdArgsSansAnalyses;
+
+ if ($Lang =~ /header/) {
+ exit 0 if (!defined ($Output));
+ $Cmd = 'cp';
+ push @CmdArgs, $file;
+ # Remove the PCH extension.
+ $Output =~ s/[.]gch$//;
+ push @CmdArgs, $Output;
+ @CmdArgsSansAnalyses = @CmdArgs;
+ }
+ else {
+ $Cmd = $Clang;
+
+ # Create arguments for doing regular parsing.
+ my $SyntaxArgs = GetCCArgs($HtmlDir, "-fsyntax-only", \@Args);
+ @CmdArgsSansAnalyses = @$SyntaxArgs;
+
+ # Create arguments for doing static analysis.
+ if (defined $ResultFile) {
+ push @Args, '-o', $ResultFile;
+ }
+ elsif (defined $HtmlDir) {
+ push @Args, '-o', $HtmlDir;
+ }
+ if ($Verbose) {
+ push @Args, "-Xclang", "-analyzer-display-progress";
+ }
+
+ foreach my $arg (@$AnalyzeArgs) {
+ push @Args, "-Xclang", $arg;
+ }
+
+ # Display Ubiviz graph?
+ if (defined $ENV{'CCC_UBI'}) {
+ push @Args, "-Xclang", "-analyzer-viz-egraph-ubigraph";
+ }
+
+ if (defined $AnalyzerTarget) {
+ push @Args, "-target", $AnalyzerTarget;
+ }
+
+ my $AnalysisArgs = GetCCArgs($HtmlDir, "--analyze", \@Args);
+ @CmdArgs = @$AnalysisArgs;
+ }
+
+ my @PrintArgs;
+ my $dir;
+
+ if ($Verbose) {
+ $dir = getcwd();
+ print STDERR "\n[LOCATION]: $dir\n";
+ push @PrintArgs,"'$Cmd'";
+ foreach my $arg (@CmdArgs) {
+ push @PrintArgs,"\'$arg\'";
+ }
+ }
+ if ($Verbose == 1) {
+ # We MUST print to stderr. Some clients use the stdout output of
+ # gcc for various purposes.
+ print STDERR join(' ', @PrintArgs);
+ print STDERR "\n";
+ }
+ elsif ($Verbose == 2) {
+ print STDERR "#SHELL (cd '$dir' && @PrintArgs)\n";
+ }
+
+ # Save STDOUT and STDERR of clang to a temporary file and reroute
+ # all clang output to ccc-analyzer's STDERR.
+ # We save the output file in the 'crashes' directory if clang encounters
+ # any problems with the file.
+ my ($ofh, $ofile) = tempfile("clang_output_XXXXXX", DIR => $HtmlDir);
+
+ my $OutputStream = silent_system($HtmlDir, $Cmd, @CmdArgs);
+ while ( <$OutputStream> ) {
+ print $ofh $_;
+ print STDERR $_;
+ }
+ my $Result = $?;
+ close $ofh;
+
+ # Did the command die because of a signal?
+ if ($ReportFailures) {
+ if ($Result & 127 and $Cmd eq $Clang and defined $HtmlDir) {
+ ProcessClangFailure($Clang, $Lang, $file, \@CmdArgsSansAnalyses,
+ $HtmlDir, "Crash", $ofile);
+ }
+ elsif ($Result) {
+ if ($IncludeParserRejects && !($file =~/conftest/)) {
+ ProcessClangFailure($Clang, $Lang, $file, \@CmdArgsSansAnalyses,
+ $HtmlDir, $ParserRejects, $ofile);
+ } else {
+ ProcessClangFailure($Clang, $Lang, $file, \@CmdArgsSansAnalyses,
+ $HtmlDir, $OtherError, $ofile);
+ }
+ }
+ else {
+ # Check if there were any unhandled attributes.
+ if (open(CHILD, $ofile)) {
+ my %attributes_not_handled;
+
+ # Don't flag warnings about the following attributes that we
+ # know are currently not supported by Clang.
+ $attributes_not_handled{"cdecl"} = 1;
+
+ my $ppfile;
+ while (<CHILD>) {
+ next if (! /warning: '([^\']+)' attribute ignored/);
+
+ # Have we already spotted this unhandled attribute?
+ next if (defined $attributes_not_handled{$1});
+ $attributes_not_handled{$1} = 1;
+
+ # Get the name of the attribute file.
+ my $dir = "$HtmlDir/failures";
+ my $afile = "$dir/attribute_ignored_$1.txt";
+
+ # Only create another preprocessed file if the attribute file
+ # doesn't exist yet.
+ next if (-e $afile);
+
+ # Add this file to the list of files that contained this attribute.
+ # Generate a preprocessed file if we haven't already.
+ if (!(defined $ppfile)) {
+ $ppfile = ProcessClangFailure($Clang, $Lang, $file,
+ \@CmdArgsSansAnalyses,
+ $HtmlDir, $AttributeIgnored, $ofile);
+ }
+
+ mkpath $dir;
+ open(AFILE, ">$afile");
+ print AFILE "$ppfile\n";
+ close(AFILE);
+ }
+ close CHILD;
+ }
+ }
+ }
+
+ unlink($ofile);
+}
+
+##----------------------------------------------------------------------------##
+# Lookup tables.
+##----------------------------------------------------------------------------##
+
+my %CompileOptionMap = (
+ '-nostdinc' => 0,
+ '-include' => 1,
+ '-idirafter' => 1,
+ '-imacros' => 1,
+ '-iprefix' => 1,
+ '-iquote' => 1,
+ '-iwithprefix' => 1,
+ '-iwithprefixbefore' => 1
+);
+
+my %LinkerOptionMap = (
+ '-framework' => 1,
+ '-fobjc-link-runtime' => 0
+);
+
+my %CompilerLinkerOptionMap = (
+ '-Wwrite-strings' => 0,
+ '-ftrapv-handler' => 1, # specifically call out separated -f flag
+ '-mios-simulator-version-min' => 0, # This really has 1 argument, but always has '='
+ '-isysroot' => 1,
+ '-arch' => 1,
+ '-m32' => 0,
+ '-m64' => 0,
+ '-stdlib' => 0, # This is really a 1 argument, but always has '='
+ '--sysroot' => 1,
+ '-target' => 1,
+ '-v' => 0,
+ '-mmacosx-version-min' => 0, # This is really a 1 argument, but always has '='
+ '-miphoneos-version-min' => 0 # This is really a 1 argument, but always has '='
+);
+
+my %IgnoredOptionMap = (
+ '-MT' => 1, # Ignore these preprocessor options.
+ '-MF' => 1,
+
+ '-fsyntax-only' => 0,
+ '-save-temps' => 0,
+ '-install_name' => 1,
+ '-exported_symbols_list' => 1,
+ '-current_version' => 1,
+ '-compatibility_version' => 1,
+ '-init' => 1,
+ '-e' => 1,
+ '-seg1addr' => 1,
+ '-bundle_loader' => 1,
+ '-multiply_defined' => 1,
+ '-sectorder' => 3,
+ '--param' => 1,
+ '-u' => 1,
+ '--serialize-diagnostics' => 1
+);
+
+my %LangMap = (
+ 'c' => $IsCXX ? 'c++' : 'c',
+ 'cp' => 'c++',
+ 'cpp' => 'c++',
+ 'cxx' => 'c++',
+ 'txx' => 'c++',
+ 'cc' => 'c++',
+ 'C' => 'c++',
+ 'ii' => 'c++-cpp-output',
+ 'i' => $IsCXX ? 'c++-cpp-output' : 'c-cpp-output',
+ 'm' => 'objective-c',
+ 'mi' => 'objective-c-cpp-output',
+ 'mm' => 'objective-c++',
+ 'mii' => 'objective-c++-cpp-output',
+);
+
+my %UniqueOptions = (
+ '-isysroot' => 0
+);
+
+##----------------------------------------------------------------------------##
+# Languages accepted.
+##----------------------------------------------------------------------------##
+
+my %LangsAccepted = (
+ "objective-c" => 1,
+ "c" => 1,
+ "c++" => 1,
+ "objective-c++" => 1,
+ "c-cpp-output" => 1,
+ "objective-c-cpp-output" => 1,
+ "c++-cpp-output" => 1
+);
+
+##----------------------------------------------------------------------------##
+# Main Logic.
+##----------------------------------------------------------------------------##
+
+my $Action = 'link';
+my @CompileOpts;
+my @LinkOpts;
+my @Files;
+my $Lang;
+my $Output;
+my %Uniqued;
+
+# Forward arguments to gcc.
+my $Status = system($Compiler,@ARGV);
+if (defined $ENV{'CCC_ANALYZER_LOG'}) {
+ print STDERR "$Compiler @ARGV\n";
+}
+if ($Status) { exit($Status >> 8); }
+
+# Get the analysis options.
+my $Analyses = $ENV{'CCC_ANALYZER_ANALYSIS'};
+
+# Get the plugins to load.
+my $Plugins = $ENV{'CCC_ANALYZER_PLUGINS'};
+
+# Get the store model.
+my $StoreModel = $ENV{'CCC_ANALYZER_STORE_MODEL'};
+
+# Get the constraints engine.
+my $ConstraintsModel = $ENV{'CCC_ANALYZER_CONSTRAINTS_MODEL'};
+
+#Get the internal stats setting.
+my $InternalStats = $ENV{'CCC_ANALYZER_INTERNAL_STATS'};
+
+# Get the output format.
+my $OutputFormat = $ENV{'CCC_ANALYZER_OUTPUT_FORMAT'};
+if (!defined $OutputFormat) { $OutputFormat = "html"; }
+
+# Get the config options.
+my $ConfigOptions = $ENV{'CCC_ANALYZER_CONFIG'};
+
+# Determine the level of verbosity.
+my $Verbose = 0;
+if (defined $ENV{'CCC_ANALYZER_VERBOSE'}) { $Verbose = 1; }
+if (defined $ENV{'CCC_ANALYZER_LOG'}) { $Verbose = 2; }
+
+# Get the HTML output directory.
+my $HtmlDir = $ENV{'CCC_ANALYZER_HTML'};
+
+my %DisabledArchs = ('ppc' => 1, 'ppc64' => 1);
+my %ArchsSeen;
+my $HadArch = 0;
+my $HasSDK = 0;
+
+# Process the arguments.
+foreach (my $i = 0; $i < scalar(@ARGV); ++$i) {
+ my $Arg = $ARGV[$i];
+ my ($ArgKey) = split /=/,$Arg,2;
+
+ # Be friendly to "" in the argument list.
+ if (!defined($ArgKey)) {
+ next;
+ }
+
+ # Modes ccc-analyzer supports
+ if ($Arg =~ /^-(E|MM?)$/) { $Action = 'preprocess'; }
+ elsif ($Arg eq '-c') { $Action = 'compile'; }
+ elsif ($Arg =~ /^-print-prog-name/) { exit 0; }
+
+ # Specially handle duplicate cases of -arch
+ if ($Arg eq "-arch") {
+ my $arch = $ARGV[$i+1];
+ # We don't want to process 'ppc' because of Clang's lack of support
+ # for Altivec (also some #defines won't likely be defined correctly, etc.)
+ if (!(defined $DisabledArchs{$arch})) { $ArchsSeen{$arch} = 1; }
+ $HadArch = 1;
+ ++$i;
+ next;
+ }
+
+ # On OSX/iOS, record if an SDK path was specified. This
+ # is innocuous for other platforms, so the check just happens.
+ if ($Arg =~ /^-isysroot/) {
+ $HasSDK = 1;
+ }
+
+ # Options with possible arguments that should pass through to compiler.
+ if (defined $CompileOptionMap{$ArgKey}) {
+ my $Cnt = $CompileOptionMap{$ArgKey};
+ push @CompileOpts,$Arg;
+ while ($Cnt > 0) { ++$i; --$Cnt; push @CompileOpts, $ARGV[$i]; }
+ next;
+ }
+ # Handle the case where there isn't a space after -iquote
+ if ($Arg =~ /^-iquote.*/) {
+ push @CompileOpts,$Arg;
+ next;
+ }
+
+ # Options with possible arguments that should pass through to linker.
+ if (defined $LinkerOptionMap{$ArgKey}) {
+ my $Cnt = $LinkerOptionMap{$ArgKey};
+ push @LinkOpts,$Arg;
+ while ($Cnt > 0) { ++$i; --$Cnt; push @LinkOpts, $ARGV[$i]; }
+ next;
+ }
+
+ # Options with possible arguments that should pass through to both compiler
+ # and the linker.
+ if (defined $CompilerLinkerOptionMap{$ArgKey}) {
+ my $Cnt = $CompilerLinkerOptionMap{$ArgKey};
+
+ # Check if this is an option that should have a unique value, and if so
+ # determine if the value was checked before.
+ if ($UniqueOptions{$Arg}) {
+ if (defined $Uniqued{$Arg}) {
+ $i += $Cnt;
+ next;
+ }
+ $Uniqued{$Arg} = 1;
+ }
+
+ push @CompileOpts,$Arg;
+ push @LinkOpts,$Arg;
+
+ while ($Cnt > 0) {
+ ++$i; --$Cnt;
+ push @CompileOpts, $ARGV[$i];
+ push @LinkOpts, $ARGV[$i];
+ }
+ next;
+ }
+
+ # Ignored options.
+ if (defined $IgnoredOptionMap{$ArgKey}) {
+ my $Cnt = $IgnoredOptionMap{$ArgKey};
+ while ($Cnt > 0) {
+ ++$i; --$Cnt;
+ }
+ next;
+ }
+
+ # Compile mode flags.
+ if ($Arg =~ /^-(?:[DIU]|isystem)(.*)$/) {
+ my $Tmp = $Arg;
+ if ($1 eq '') {
+ # FIXME: Check if we are going off the end.
+ ++$i;
+ $Tmp = $Arg . $ARGV[$i];
+ }
+ push @CompileOpts,$Tmp;
+ next;
+ }
+
+ if ($Arg =~ /^-m.*/) {
+ push @CompileOpts,$Arg;
+ next;
+ }
+
+ # Language.
+ if ($Arg eq '-x') {
+ $Lang = $ARGV[$i+1];
+ ++$i; next;
+ }
+
+ # Output file.
+ if ($Arg eq '-o') {
+ ++$i;
+ $Output = $ARGV[$i];
+ next;
+ }
+
+ # Get the link mode.
+ if ($Arg =~ /^-[l,L,O]/) {
+ if ($Arg eq '-O') { push @LinkOpts,'-O1'; }
+ elsif ($Arg eq '-Os') { push @LinkOpts,'-O2'; }
+ else { push @LinkOpts,$Arg; }
+
+ # Must pass this along for the __OPTIMIZE__ macro
+ if ($Arg =~ /^-O/) { push @CompileOpts,$Arg; }
+ next;
+ }
+
+ if ($Arg =~ /^-std=/) {
+ push @CompileOpts,$Arg;
+ next;
+ }
+
+ # Get the compiler/link mode.
+ if ($Arg =~ /^-F(.+)$/) {
+ my $Tmp = $Arg;
+ if ($1 eq '') {
+ # FIXME: Check if we are going off the end.
+ ++$i;
+ $Tmp = $Arg . $ARGV[$i];
+ }
+ push @CompileOpts,$Tmp;
+ push @LinkOpts,$Tmp;
+ next;
+ }
+
+ # Input files.
+ if ($Arg eq '-filelist') {
+ # FIXME: Make sure we aren't walking off the end.
+ open(IN, $ARGV[$i+1]);
+ while (<IN>) { s/\015?\012//; push @Files,$_; }
+ close(IN);
+ ++$i;
+ next;
+ }
+
+ if ($Arg =~ /^-f/) {
+ push @CompileOpts,$Arg;
+ push @LinkOpts,$Arg;
+ next;
+ }
+
+ # Handle -Wno-. We don't care about extra warnings, but
+ # we should suppress ones that we don't want to see.
+ if ($Arg =~ /^-Wno-/) {
+ push @CompileOpts, $Arg;
+ next;
+ }
+
+ # Handle -Xclang some-arg. Add both arguments to the compiler options.
+ if ($Arg =~ /^-Xclang$/) {
+ # FIXME: Check if we are going off the end.
+ ++$i;
+ push @CompileOpts, $Arg;
+ push @CompileOpts, $ARGV[$i];
+ next;
+ }
+
+ if (!($Arg =~ /^-/)) {
+ push @Files, $Arg;
+ next;
+ }
+}
+
+# If we are on OSX and have an installation where the
+# default SDK is inferred by xcrun use xcrun to infer
+# the SDK.
+if (not $HasSDK and $UseXCRUN) {
+ my $sdk = `/usr/bin/xcrun --show-sdk-path -sdk macosx`;
+ chomp $sdk;
+ push @CompileOpts, "-isysroot", $sdk;
+}
+
+if ($Action eq 'compile' or $Action eq 'link') {
+ my @Archs = keys %ArchsSeen;
+ # Skip the file if we don't support the architectures specified.
+ exit 0 if ($HadArch && scalar(@Archs) == 0);
+
+ foreach my $file (@Files) {
+ # Determine the language for the file.
+ my $FileLang = $Lang;
+
+ if (!defined($FileLang)) {
+ # Infer the language from the extension.
+ if ($file =~ /[.]([^.]+)$/) {
+ $FileLang = $LangMap{$1};
+ }
+ }
+
+ # FileLang still not defined? Skip the file.
+ next if (!defined $FileLang);
+
+ # Language not accepted?
+ next if (!defined $LangsAccepted{$FileLang});
+
+ my @CmdArgs;
+ my @AnalyzeArgs;
+
+ if ($FileLang ne 'unknown') {
+ push @CmdArgs, '-x', $FileLang;
+ }
+
+ if (defined $StoreModel) {
+ push @AnalyzeArgs, "-analyzer-store=$StoreModel";
+ }
+
+ if (defined $ConstraintsModel) {
+ push @AnalyzeArgs, "-analyzer-constraints=$ConstraintsModel";
+ }
+
+ if (defined $InternalStats) {
+ push @AnalyzeArgs, "-analyzer-stats";
+ }
+
+ if (defined $Analyses) {
+ push @AnalyzeArgs, split '\s+', $Analyses;
+ }
+
+ if (defined $Plugins) {
+ push @AnalyzeArgs, split '\s+', $Plugins;
+ }
+
+ if (defined $OutputFormat) {
+ push @AnalyzeArgs, "-analyzer-output=" . $OutputFormat;
+ if ($OutputFormat =~ /plist/) {
+ # Change "Output" to be a file.
+ my ($h, $f) = tempfile("report-XXXXXX", SUFFIX => ".plist",
+ DIR => $HtmlDir);
+ $ResultFile = $f;
+ # If the HtmlDir is not set, we should clean up the plist files.
+ if (!defined $HtmlDir || -z $HtmlDir) {
+ $CleanupFile = $f;
+ }
+ }
+ }
+ if (defined $ConfigOptions) {
+ push @AnalyzeArgs, split '\s+', $ConfigOptions;
+ }
+
+ push @CmdArgs, @CompileOpts;
+ push @CmdArgs, $file;
+
+ if (scalar @Archs) {
+ foreach my $arch (@Archs) {
+ my @NewArgs;
+ push @NewArgs, '-arch', $arch;
+ push @NewArgs, @CmdArgs;
+ Analyze($Clang, \@NewArgs, \@AnalyzeArgs, $FileLang, $Output,
+ $Verbose, $HtmlDir, $file);
+ }
+ }
+ else {
+ Analyze($Clang, \@CmdArgs, \@AnalyzeArgs, $FileLang, $Output,
+ $Verbose, $HtmlDir, $file);
+ }
+ }
+}
diff --git a/clang-2690385/tools/scan-build/libexec/ccc-analyzer.bat b/clang-2690385/tools/scan-build/libexec/ccc-analyzer.bat
new file mode 100644
index 00000000..2a85376e
--- /dev/null
+++ b/clang-2690385/tools/scan-build/libexec/ccc-analyzer.bat
@@ -0,0 +1 @@
+perl -S ccc-analyzer %*
diff --git a/clang-2690385/tools/scan-build/man/scan-build.1 b/clang-2690385/tools/scan-build/man/scan-build.1
new file mode 100644
index 00000000..3d3a9f8e
--- /dev/null
+++ b/clang-2690385/tools/scan-build/man/scan-build.1
@@ -0,0 +1,349 @@
+.\" This file is distributed under the University of Illinois Open Source
+.\" License. See LICENSE.TXT for details.
+.\" $Id$
+.Dd May 25, 2012
+.Dt SCAN-BUILD 1
+.Os "clang" "3.5"
+.Sh NAME
+.Nm scan-build
+.Nd Clang static analyzer
+.Sh SYNOPSIS
+.Nm
+.Op Fl ohkvV
+.Op Fl analyze-headers
+.Op Fl enable-checker Op Ar checker_name
+.Op Fl disable-checker Op Ar checker_name
+.Op Fl Fl help
+.Op Fl Fl help-checkers
+.Op Fl Fl html-title Op Ar =title
+.Op Fl Fl keep-going
+.Op Fl plist
+.Op Fl plist-html
+.Op Fl Fl status-bugs
+.Op Fl Fl use-c++ Op Ar =compiler_path
+.Op Fl Fl use-cc Op Ar =compiler_path
+.Op Fl Fl view
+.Op Fl constraints Op Ar model
+.Op Fl maxloop Ar N
+.Op Fl no-failure-reports
+.Op Fl stats
+.Op Fl store Op Ar model
+.Ar build_command
+.Op build_options
+.\"
+.\" Sh DESCRIPTION
+.Sh DESCRIPTION
+.Nm
+is a Perl script that invokes the Clang static analyzer. Options used by
+.Nm
+or by the analyzer appear first, followed by the
+.Ar build_command
+and any
+.Ar build_options
+normally used to build the target system.
+.Pp
+The static analyzer employs a long list of checking algorithms, see
+.Sx CHECKERS .
+Output can be written in standard
+.Li .plist
+and/or HTML format.
+.Pp
+The following options are supported:
+.Bl -tag -width indent
+.It Fl analyze-headers
+Also analyze functions in #included files.
+.It Fl enable-checker Ar checker_name , Fl disable-checker Ar checker_name
+Enable/disable
+.Ar checker_name .
+See
+.Sx CHECKERS .
+.It Fl h , Fl Fl help
+Display this message.
+.It Fl Fl help-checkers
+List default checkers, see
+.Sx CHECKERS .
+.It Fl Fl html-title Ns Op = Ns Ar title
+Specify the title used on generated HTML pages.
+A default title is generated if
+.Ar title
+is not specified.
+.It Fl k , Fl Fl keep-going
+Add a
+.Dq keep on going
+option to
+.Ar build_command .
+Currently supports make and xcodebuild. This is a convenience option;
+one can specify this behavior directly using build options.
+.It Fl o
+Target directory for HTML report files. Subdirectories will be
+created as needed to represent separate invocations
+of the analyzer. If this option is not specified, a directory is
+created in /tmp (TMPDIR on Mac OS X) to store the reports.
+.It Fl plist
+Output the results as a set of
+.Li .plist
+files. (By default the output of
+.Nm
+is a set of HTML files.)
+.It Fl plist-html
+Output the results as a set of HTML and .plist files
+.It Fl Fl status-bugs
+Set exit status to 1 if it found potential bugs and 0 otherwise. By
+default the exit status of
+.Nm
+is that returned by
+.Ar build_command .
+.It Fl Fl use-c++ Ns Op = Ns Ar compiler_path
+Guess the default compiler for your C++ and Objective-C++ code. Use this
+option to specify an alternate compiler.
+.It Fl Fl use-cc Ns Op = Ns Ar compiler_path
+Guess the default compiler for your C and Objective-C code. Use this
+option to specify an alternate compiler.
+.It Fl v
+Verbose output from
+.Nm
+and the analyzer. A second and
+third
+.Ar v
+increases verbosity.
+.It Fl V , Fl Fl view
+View analysis results in a web browser when the build completes.
+.It Fl constraints Op Ar model
+Specify the contraint engine used by the analyzer. By default the
+.Ql range
+model is used. Specifying
+.Ql basic
+uses a simpler, less powerful constraint model used by checker-0.160
+and earlier.
+.It Fl maxloop Ar N
+Specifiy the number of times a block can be visited before giving
+up. Default is 4. Increase for more comprehensive coverage at a
+cost of speed.
+.It Fl no-failure-reports
+Do not create a
+.Ql failures
+subdirectory that includes analyzer crash reports and preprocessed
+source files.
+.It Fl stats
+Generates visitation statistics for the project being analyzed.
+.It Fl store Op Ar model
+Specify the store model used by the analyzer. By default, the
+.Ql region
+store model is used.
+.Ql region
+specifies a field-
+sensitive store model. Users can also specify
+.Ql basic
+which is far less precise but can more quickly analyze code.
+.Ql basic
+was the default store model for checker-0.221 and earlier.
+.\"
+.El
+.Sh EXIT STATUS
+.Nm
+returns the value returned by
+.Ar build_command
+unless
+.Fl Fl status-bugs
+or
+.Fl Fl keep-going
+is used.
+.\"
+.\" Other sections not yet used ...
+.\" .Sh ENVIRONMENT
+.\" .Sh FILES
+.\" .Sh DIAGNOSTICS
+.\" .Sh COMPATIBILITY
+.\" .Sh HISTORY
+.\" .Sh BUGS
+.\"
+.Sh CHECKERS
+The checkers listed below may be enabled/disabled using the
+.Fl enable-checker
+and
+.Fl disable-checker
+options.
+A default group of checkers is run unless explicitly disabled.
+Exactly which checkers constitute the default group is a function
+of the operating system in use; they are listed with
+.Fl Fl help-checkers .
+.Bl -tag -width indent.
+.It core.AdjustedReturnValue
+Check to see if the return value of a function call is different than
+the caller expects (e.g., from calls through function pointers).
+.It core.AttributeNonNull
+Check for null pointers passed as arguments to a function whose arguments are marked with the
+.Ql nonnull
+attribute.
+.It core.CallAndMessage
+Check for logical errors for function calls and Objective-C message expressions (e.g., uninitialized arguments, null function pointers).
+.It core.DivideZero
+Check for division by zero.
+.It core.NullDereference
+Check for dereferences of null pointers.
+.It core.StackAddressEscape
+Check that addresses to stack memory do not escape the function.
+.It core.UndefinedBinaryOperatorResult
+Check for undefined results of binary operators.
+.It core.VLASize
+Check for declarations of VLA of undefined or zero size.
+.It core.builtin.BuiltinFunctions
+Evaluate compiler builtin functions, e.g.
+.Fn alloca .
+.It core.builtin.NoReturnFunctions
+Evaluate
+.Ql panic
+functions that are known to not return to the caller.
+.It core.uninitialized.ArraySubscript
+Check for uninitialized values used as array subscripts.
+.It core.uninitialized.Assign
+Check for assigning uninitialized values.
+.It core.uninitialized.Branch
+Check for uninitialized values used as branch conditions.
+.It core.uninitialized.CapturedBlockVariable
+Check for blocks that capture uninitialized values.
+.It core.uninitialized.UndefReturn
+Check for uninitialized values being returned to the caller.
+.It deadcode.DeadStores
+Check for values stored to variables that are never read afterwards.
+.It debug.DumpCFG
+Display Control-Flow Graphs.
+.It debug.DumpCallGraph
+Display Call Graph.
+.It debug.DumpDominators
+Print the dominance tree for a given Control-Flow Graph.
+.It debug.DumpLiveVars
+Print results of live variable analysis.
+.It debug.Stats
+Emit warnings with analyzer statistics.
+.It debug.TaintTest
+Mark tainted symbols as such.
+.It debug.ViewCFG
+View Control-Flow Graphs using
+.Ic GraphViz .
+.It debug.ViewCallGraph
+View Call Graph using
+.Ic GraphViz .
+.It llvm.Conventions
+Check code for LLVM codebase conventions.
+.It osx.API
+Check for proper uses of various Mac OS X APIs.
+.It osx.AtomicCAS
+Evaluate calls to
+.Vt OSAtomic
+functions.
+.It osx.SecKeychainAPI
+Check for proper uses of Secure Keychain APIs.
+.It osx.cocoa.AtSync
+Check for null pointers used as mutexes for @synchronized.
+.It osx.cocoa.ClassRelease
+Check for sending
+.Ql retain ,
+.Ql release,
+or
+.Ql autorelease
+directly to a Class.
+.It osx.cocoa.IncompatibleMethodTypes
+Warn about Objective-C method signatures with type incompatibilities.
+.It osx.cocoa.NSAutoreleasePool
+Warn for suboptimal uses of
+.Vt NSAutoreleasePool
+in Objective-C GC mode.
+.It osx.cocoa.NSError
+Check usage of NSError** parameters.
+.It osx.cocoa.NilArg
+Check for prohibited nil arguments to Objective-C method calls.
+.It osx.cocoa.RetainCount
+Check for leaks and improper reference count management.
+.It osx.cocoa.SelfInit
+Check that
+.Ql self
+is properly initialized inside an initializer method.
+.It osx.cocoa.UnusedIvars
+Warn about private ivars that are never used.
+.It osx.cocoa.VariadicMethodTypes
+Check for passing non-Objective-C types to variadic methods that expect only Objective-C types.
+.It osx.coreFoundation.CFError
+Check usage of CFErrorRef* parameters.
+.It osx.coreFoundation.CFNumber
+Check for proper uses of
+.Fn CFNumberCreate .
+.It osx.coreFoundation.CFRetainRelease
+Check for null arguments to
+.Fn CFRetain ,
+.Fn CFRelease ,
+and
+.Fn CFMakeCollectable .
+.It osx.coreFoundation.containers.OutOfBounds
+Checks for index out-of-bounds when using the
+.Vt CFArray
+API.
+.It osx.coreFoundation.containers.PointerSizedValues
+Warns if
+.Vt CFArray ,
+.Vt CFDictionary ,
+or
+.Vt CFSet
+are created with non-pointer-size values.
+.It security.FloatLoopCounter
+Warn on using a floating point value as a loop counter (CERT: FLP30-C, FLP30-CPP).
+.It security.insecureAPI.UncheckedReturn
+Warn on uses of functions whose return values must be always checked.
+.It security.insecureAPI.getpw
+Warn on uses of
+.Fn getpw .
+.It security.insecureAPI.gets
+Warn on uses of
+.Fn gets .
+.It security.insecureAPI.mkstemp
+Warn when
+.Fn mkstemp
+is passed fewer than 6 X's in the format string.
+.It security.insecureAPI.mktemp
+Warn on uses of
+.Fn mktemp .
+.It security.insecureAPI.rand
+Warn on uses of
+.Fn rand ,
+.Fn random ,
+and related functions.
+.It security.insecureAPI.strcpy
+Warn on uses of
+.Fn strcpy
+and
+.Fn strcat .
+.It security.insecureAPI.vfork
+Warn on uses of
+.Fn vfork .
+.It unix.API
+Check calls to various UNIX/Posix functions.
+.It unix.Malloc
+Check for memory leaks, double free, and use-after-free.
+.It unix.cstring.BadSizeArg
+Check the size argument passed into C string functions for common
+erroneous patterns.
+.It unix.cstring.NullArg
+Check for null pointers being passed as arguments to C string functions.
+.El
+.\"
+.Sh EXAMPLE
+.Ic scan-build -o /tmp/myhtmldir make -j4
+.Pp
+The above example causes analysis reports to be deposited into
+a subdirectory of
+.Pa /tmp/myhtmldir
+and to run
+.Ic make
+with the
+.Fl j4
+option.
+A different subdirectory is created each time
+.Nm
+analyzes a project.
+The analyzer should support most parallel builds, but not distributed builds.
+.Sh AUTHORS
+.Nm
+was written by
+.An "Ted Kremenek" .
+Documentation contributed by
+.An "James K. Lowden" Aq jklowden@schemamania.org .
diff --git a/clang-2690385/tools/scan-build/share/scan-build/scanview.css b/clang-2690385/tools/scan-build/share/scan-build/scanview.css
new file mode 100644
index 00000000..cf8a5a6a
--- /dev/null
+++ b/clang-2690385/tools/scan-build/share/scan-build/scanview.css
@@ -0,0 +1,62 @@
+body { color:#000000; background-color:#ffffff }
+body { font-family: Helvetica, sans-serif; font-size:9pt }
+h1 { font-size: 14pt; }
+h2 { font-size: 12pt; }
+table { font-size:9pt }
+table { border-spacing: 0px; border: 1px solid black }
+th, table thead {
+ background-color:#eee; color:#666666;
+ font-weight: bold; cursor: default;
+ text-align:center;
+ font-weight: bold; font-family: Verdana;
+ white-space:nowrap;
+}
+.W { font-size:0px }
+th, td { padding:5px; padding-left:8px; text-align:left }
+td.SUMM_DESC { padding-left:12px }
+td.DESC { white-space:pre }
+td.Q { text-align:right }
+td { text-align:left }
+tbody.scrollContent { overflow:auto }
+
+table.form_group {
+ background-color: #ccc;
+ border: 1px solid #333;
+ padding: 2px;
+}
+
+table.form_inner_group {
+ background-color: #ccc;
+ border: 1px solid #333;
+ padding: 0px;
+}
+
+table.form {
+ background-color: #999;
+ border: 1px solid #333;
+ padding: 2px;
+}
+
+td.form_label {
+ text-align: right;
+ vertical-align: top;
+}
+/* For one line entires */
+td.form_clabel {
+ text-align: right;
+ vertical-align: center;
+}
+td.form_value {
+ text-align: left;
+ vertical-align: top;
+}
+td.form_submit {
+ text-align: right;
+ vertical-align: top;
+}
+
+h1.SubmitFail {
+ color: #f00;
+}
+h1.SubmitOk {
+}
diff --git a/clang-2690385/tools/scan-build/share/scan-build/sorttable.js b/clang-2690385/tools/scan-build/share/scan-build/sorttable.js
new file mode 100644
index 00000000..32faa078
--- /dev/null
+++ b/clang-2690385/tools/scan-build/share/scan-build/sorttable.js
@@ -0,0 +1,492 @@
+/*
+ SortTable
+ version 2
+ 7th April 2007
+ Stuart Langridge, http://www.kryogenix.org/code/browser/sorttable/
+
+ Instructions:
+ Download this file
+ Add <script src="sorttable.js"></script> to your HTML
+ Add class="sortable" to any table you'd like to make sortable
+ Click on the headers to sort
+
+ Thanks to many, many people for contributions and suggestions.
+ Licenced as X11: http://www.kryogenix.org/code/browser/licence.html
+ This basically means: do what you want with it.
+*/
+
+
+var stIsIE = /*@cc_on!@*/false;
+
+sorttable = {
+ init: function() {
+ // quit if this function has already been called
+ if (arguments.callee.done) return;
+ // flag this function so we don't do the same thing twice
+ arguments.callee.done = true;
+ // kill the timer
+ if (_timer) clearInterval(_timer);
+
+ if (!document.createElement || !document.getElementsByTagName) return;
+
+ sorttable.DATE_RE = /^(\d\d?)[\/\.-](\d\d?)[\/\.-]((\d\d)?\d\d)$/;
+
+ forEach(document.getElementsByTagName('table'), function(table) {
+ if (table.className.search(/\bsortable\b/) != -1) {
+ sorttable.makeSortable(table);
+ }
+ });
+
+ },
+
+ makeSortable: function(table) {
+ if (table.getElementsByTagName('thead').length == 0) {
+ // table doesn't have a tHead. Since it should have, create one and
+ // put the first table row in it.
+ the = document.createElement('thead');
+ the.appendChild(table.rows[0]);
+ table.insertBefore(the,table.firstChild);
+ }
+ // Safari doesn't support table.tHead, sigh
+ if (table.tHead == null) table.tHead = table.getElementsByTagName('thead')[0];
+
+ if (table.tHead.rows.length != 1) return; // can't cope with two header rows
+
+ // Sorttable v1 put rows with a class of "sortbottom" at the bottom (as
+ // "total" rows, for example). This is B&R, since what you're supposed
+ // to do is put them in a tfoot. So, if there are sortbottom rows,
+ // for backward compatibility, move them to tfoot (creating it if needed).
+ sortbottomrows = [];
+ for (var i=0; i<table.rows.length; i++) {
+ if (table.rows[i].className.search(/\bsortbottom\b/) != -1) {
+ sortbottomrows[sortbottomrows.length] = table.rows[i];
+ }
+ }
+ if (sortbottomrows) {
+ if (table.tFoot == null) {
+ // table doesn't have a tfoot. Create one.
+ tfo = document.createElement('tfoot');
+ table.appendChild(tfo);
+ }
+ for (var i=0; i<sortbottomrows.length; i++) {
+ tfo.appendChild(sortbottomrows[i]);
+ }
+ delete sortbottomrows;
+ }
+
+ // work through each column and calculate its type
+ headrow = table.tHead.rows[0].cells;
+ for (var i=0; i<headrow.length; i++) {
+ // manually override the type with a sorttable_type attribute
+ if (!headrow[i].className.match(/\bsorttable_nosort\b/)) { // skip this col
+ mtch = headrow[i].className.match(/\bsorttable_([a-z0-9]+)\b/);
+ if (mtch) { override = mtch[1]; }
+ if (mtch && typeof sorttable["sort_"+override] == 'function') {
+ headrow[i].sorttable_sortfunction = sorttable["sort_"+override];
+ } else {
+ headrow[i].sorttable_sortfunction = sorttable.guessType(table,i);
+ }
+ // make it clickable to sort
+ headrow[i].sorttable_columnindex = i;
+ headrow[i].sorttable_tbody = table.tBodies[0];
+ dean_addEvent(headrow[i],"click", function(e) {
+
+ if (this.className.search(/\bsorttable_sorted\b/) != -1) {
+ // if we're already sorted by this column, just
+ // reverse the table, which is quicker
+ sorttable.reverse(this.sorttable_tbody);
+ this.className = this.className.replace('sorttable_sorted',
+ 'sorttable_sorted_reverse');
+ this.removeChild(document.getElementById('sorttable_sortfwdind'));
+ sortrevind = document.createElement('span');
+ sortrevind.id = "sorttable_sortrevind";
+ sortrevind.innerHTML = stIsIE ? '&nbsp<font face="webdings">5</font>' : '&nbsp;&#x25B4;';
+ this.appendChild(sortrevind);
+ return;
+ }
+ if (this.className.search(/\bsorttable_sorted_reverse\b/) != -1) {
+ // if we're already sorted by this column in reverse, just
+ // re-reverse the table, which is quicker
+ sorttable.reverse(this.sorttable_tbody);
+ this.className = this.className.replace('sorttable_sorted_reverse',
+ 'sorttable_sorted');
+ this.removeChild(document.getElementById('sorttable_sortrevind'));
+ sortfwdind = document.createElement('span');
+ sortfwdind.id = "sorttable_sortfwdind";
+ sortfwdind.innerHTML = stIsIE ? '&nbsp<font face="webdings">6</font>' : '&nbsp;&#x25BE;';
+ this.appendChild(sortfwdind);
+ return;
+ }
+
+ // remove sorttable_sorted classes
+ theadrow = this.parentNode;
+ forEach(theadrow.childNodes, function(cell) {
+ if (cell.nodeType == 1) { // an element
+ cell.className = cell.className.replace('sorttable_sorted_reverse','');
+ cell.className = cell.className.replace('sorttable_sorted','');
+ }
+ });
+ sortfwdind = document.getElementById('sorttable_sortfwdind');
+ if (sortfwdind) { sortfwdind.parentNode.removeChild(sortfwdind); }
+ sortrevind = document.getElementById('sorttable_sortrevind');
+ if (sortrevind) { sortrevind.parentNode.removeChild(sortrevind); }
+
+ this.className += ' sorttable_sorted';
+ sortfwdind = document.createElement('span');
+ sortfwdind.id = "sorttable_sortfwdind";
+ sortfwdind.innerHTML = stIsIE ? '&nbsp<font face="webdings">6</font>' : '&nbsp;&#x25BE;';
+ this.appendChild(sortfwdind);
+
+ // build an array to sort. This is a Schwartzian transform thing,
+ // i.e., we "decorate" each row with the actual sort key,
+ // sort based on the sort keys, and then put the rows back in order
+ // which is a lot faster because you only do getInnerText once per row
+ row_array = [];
+ col = this.sorttable_columnindex;
+ rows = this.sorttable_tbody.rows;
+ for (var j=0; j<rows.length; j++) {
+ row_array[row_array.length] = [sorttable.getInnerText(rows[j].cells[col]), rows[j]];
+ }
+ /* If you want a stable sort, uncomment the following line */
+ sorttable.shaker_sort(row_array, this.sorttable_sortfunction);
+ /* and comment out this one */
+ //row_array.sort(this.sorttable_sortfunction);
+
+ tb = this.sorttable_tbody;
+ for (var j=0; j<row_array.length; j++) {
+ tb.appendChild(row_array[j][1]);
+ }
+
+ delete row_array;
+ });
+ }
+ }
+ },
+
+ guessType: function(table, column) {
+ // guess the type of a column based on its first non-blank row
+ sortfn = sorttable.sort_alpha;
+ for (var i=0; i<table.tBodies[0].rows.length; i++) {
+ text = sorttable.getInnerText(table.tBodies[0].rows[i].cells[column]);
+ if (text != '') {
+ if (text.match(/^-?[£$¤]?[\d,.]+%?$/)) {
+ return sorttable.sort_numeric;
+ }
+ // check for a date: dd/mm/yyyy or dd/mm/yy
+ // can have / or . or - as separator
+ // can be mm/dd as well
+ possdate = text.match(sorttable.DATE_RE)
+ if (possdate) {
+ // looks like a date
+ first = parseInt(possdate[1]);
+ second = parseInt(possdate[2]);
+ if (first > 12) {
+ // definitely dd/mm
+ return sorttable.sort_ddmm;
+ } else if (second > 12) {
+ return sorttable.sort_mmdd;
+ } else {
+ // looks like a date, but we can't tell which, so assume
+ // that it's dd/mm (English imperialism!) and keep looking
+ sortfn = sorttable.sort_ddmm;
+ }
+ }
+ }
+ }
+ return sortfn;
+ },
+
+ getInnerText: function(node) {
+ // gets the text we want to use for sorting for a cell.
+ // strips leading and trailing whitespace.
+ // this is *not* a generic getInnerText function; it's special to sorttable.
+ // for example, you can override the cell text with a customkey attribute.
+ // it also gets .value for <input> fields.
+
+ hasInputs = (typeof node.getElementsByTagName == 'function') &&
+ node.getElementsByTagName('input').length;
+
+ if (node.getAttribute("sorttable_customkey") != null) {
+ return node.getAttribute("sorttable_customkey");
+ }
+ else if (typeof node.textContent != 'undefined' && !hasInputs) {
+ return node.textContent.replace(/^\s+|\s+$/g, '');
+ }
+ else if (typeof node.innerText != 'undefined' && !hasInputs) {
+ return node.innerText.replace(/^\s+|\s+$/g, '');
+ }
+ else if (typeof node.text != 'undefined' && !hasInputs) {
+ return node.text.replace(/^\s+|\s+$/g, '');
+ }
+ else {
+ switch (node.nodeType) {
+ case 3:
+ if (node.nodeName.toLowerCase() == 'input') {
+ return node.value.replace(/^\s+|\s+$/g, '');
+ }
+ case 4:
+ return node.nodeValue.replace(/^\s+|\s+$/g, '');
+ break;
+ case 1:
+ case 11:
+ var innerText = '';
+ for (var i = 0; i < node.childNodes.length; i++) {
+ innerText += sorttable.getInnerText(node.childNodes[i]);
+ }
+ return innerText.replace(/^\s+|\s+$/g, '');
+ break;
+ default:
+ return '';
+ }
+ }
+ },
+
+ reverse: function(tbody) {
+ // reverse the rows in a tbody
+ newrows = [];
+ for (var i=0; i<tbody.rows.length; i++) {
+ newrows[newrows.length] = tbody.rows[i];
+ }
+ for (var i=newrows.length-1; i>=0; i--) {
+ tbody.appendChild(newrows[i]);
+ }
+ delete newrows;
+ },
+
+ /* sort functions
+ each sort function takes two parameters, a and b
+ you are comparing a[0] and b[0] */
+ sort_numeric: function(a,b) {
+ aa = parseFloat(a[0].replace(/[^0-9.-]/g,''));
+ if (isNaN(aa)) aa = 0;
+ bb = parseFloat(b[0].replace(/[^0-9.-]/g,''));
+ if (isNaN(bb)) bb = 0;
+ return aa-bb;
+ },
+ sort_alpha: function(a,b) {
+ if (a[0]==b[0]) return 0;
+ if (a[0]<b[0]) return -1;
+ return 1;
+ },
+ sort_ddmm: function(a,b) {
+ mtch = a[0].match(sorttable.DATE_RE);
+ y = mtch[3]; m = mtch[2]; d = mtch[1];
+ if (m.length == 1) m = '0'+m;
+ if (d.length == 1) d = '0'+d;
+ dt1 = y+m+d;
+ mtch = b[0].match(sorttable.DATE_RE);
+ y = mtch[3]; m = mtch[2]; d = mtch[1];
+ if (m.length == 1) m = '0'+m;
+ if (d.length == 1) d = '0'+d;
+ dt2 = y+m+d;
+ if (dt1==dt2) return 0;
+ if (dt1<dt2) return -1;
+ return 1;
+ },
+ sort_mmdd: function(a,b) {
+ mtch = a[0].match(sorttable.DATE_RE);
+ y = mtch[3]; d = mtch[2]; m = mtch[1];
+ if (m.length == 1) m = '0'+m;
+ if (d.length == 1) d = '0'+d;
+ dt1 = y+m+d;
+ mtch = b[0].match(sorttable.DATE_RE);
+ y = mtch[3]; d = mtch[2]; m = mtch[1];
+ if (m.length == 1) m = '0'+m;
+ if (d.length == 1) d = '0'+d;
+ dt2 = y+m+d;
+ if (dt1==dt2) return 0;
+ if (dt1<dt2) return -1;
+ return 1;
+ },
+
+ shaker_sort: function(list, comp_func) {
+ // A stable sort function to allow multi-level sorting of data
+ // see: http://en.wikipedia.org/wiki/Cocktail_sort
+ // thanks to Joseph Nahmias
+ var b = 0;
+ var t = list.length - 1;
+ var swap = true;
+
+ while(swap) {
+ swap = false;
+ for(var i = b; i < t; ++i) {
+ if ( comp_func(list[i], list[i+1]) > 0 ) {
+ var q = list[i]; list[i] = list[i+1]; list[i+1] = q;
+ swap = true;
+ }
+ } // for
+ t--;
+
+ if (!swap) break;
+
+ for(var i = t; i > b; --i) {
+ if ( comp_func(list[i], list[i-1]) < 0 ) {
+ var q = list[i]; list[i] = list[i-1]; list[i-1] = q;
+ swap = true;
+ }
+ } // for
+ b++;
+
+ } // while(swap)
+ }
+}
+
+/* ******************************************************************
+ Supporting functions: bundled here to avoid depending on a library
+ ****************************************************************** */
+
+// Dean Edwards/Matthias Miller/John Resig
+
+/* for Mozilla/Opera9 */
+if (document.addEventListener) {
+ document.addEventListener("DOMContentLoaded", sorttable.init, false);
+}
+
+/* for Internet Explorer */
+/*@cc_on @*/
+/*@if (@_win32)
+ document.write("<script id=__ie_onload defer src=javascript:void(0)><\/script>");
+ var script = document.getElementById("__ie_onload");
+ script.onreadystatechange = function() {
+ if (this.readyState == "complete") {
+ sorttable.init(); // call the onload handler
+ }
+ };
+/*@end @*/
+
+/* for Safari */
+if (/WebKit/i.test(navigator.userAgent)) { // sniff
+ var _timer = setInterval(function() {
+ if (/loaded|complete/.test(document.readyState)) {
+ sorttable.init(); // call the onload handler
+ }
+ }, 10);
+}
+
+/* for other browsers */
+window.onload = sorttable.init;
+
+// written by Dean Edwards, 2005
+// with input from Tino Zijdel, Matthias Miller, Diego Perini
+
+// http://dean.edwards.name/weblog/2005/10/add-event/
+
+function dean_addEvent(element, type, handler) {
+ if (element.addEventListener) {
+ element.addEventListener(type, handler, false);
+ } else {
+ // assign each event handler a unique ID
+ if (!handler.$$guid) handler.$$guid = dean_addEvent.guid++;
+ // create a hash table of event types for the element
+ if (!element.events) element.events = {};
+ // create a hash table of event handlers for each element/event pair
+ var handlers = element.events[type];
+ if (!handlers) {
+ handlers = element.events[type] = {};
+ // store the existing event handler (if there is one)
+ if (element["on" + type]) {
+ handlers[0] = element["on" + type];
+ }
+ }
+ // store the event handler in the hash table
+ handlers[handler.$$guid] = handler;
+ // assign a global event handler to do all the work
+ element["on" + type] = handleEvent;
+ }
+};
+// a counter used to create unique IDs
+dean_addEvent.guid = 1;
+
+function removeEvent(element, type, handler) {
+ if (element.removeEventListener) {
+ element.removeEventListener(type, handler, false);
+ } else {
+ // delete the event handler from the hash table
+ if (element.events && element.events[type]) {
+ delete element.events[type][handler.$$guid];
+ }
+ }
+};
+
+function handleEvent(event) {
+ var returnValue = true;
+ // grab the event object (IE uses a global event object)
+ event = event || fixEvent(((this.ownerDocument || this.document || this).parentWindow || window).event);
+ // get a reference to the hash table of event handlers
+ var handlers = this.events[event.type];
+ // execute each event handler
+ for (var i in handlers) {
+ this.$$handleEvent = handlers[i];
+ if (this.$$handleEvent(event) === false) {
+ returnValue = false;
+ }
+ }
+ return returnValue;
+};
+
+function fixEvent(event) {
+ // add W3C standard event methods
+ event.preventDefault = fixEvent.preventDefault;
+ event.stopPropagation = fixEvent.stopPropagation;
+ return event;
+};
+fixEvent.preventDefault = function() {
+ this.returnValue = false;
+};
+fixEvent.stopPropagation = function() {
+ this.cancelBubble = true;
+}
+
+// Dean's forEach: http://dean.edwards.name/base/forEach.js
+/*
+ forEach, version 1.0
+ Copyright 2006, Dean Edwards
+ License: http://www.opensource.org/licenses/mit-license.php
+*/
+
+// array-like enumeration
+if (!Array.forEach) { // mozilla already supports this
+ Array.forEach = function(array, block, context) {
+ for (var i = 0; i < array.length; i++) {
+ block.call(context, array[i], i, array);
+ }
+ };
+}
+
+// generic enumeration
+Function.prototype.forEach = function(object, block, context) {
+ for (var key in object) {
+ if (typeof this.prototype[key] == "undefined") {
+ block.call(context, object[key], key, object);
+ }
+ }
+};
+
+// character enumeration
+String.forEach = function(string, block, context) {
+ Array.forEach(string.split(""), function(chr, index) {
+ block.call(context, chr, index, string);
+ });
+};
+
+// globally resolve forEach enumeration
+var forEach = function(object, block, context) {
+ if (object) {
+ var resolve = Object; // default
+ if (object instanceof Function) {
+ // functions have a "length" property
+ resolve = Function;
+ } else if (object.forEach instanceof Function) {
+ // the object implements a custom forEach method so use that
+ object.forEach(block, context);
+ return;
+ } else if (typeof object == "string") {
+ // the object is a string
+ resolve = String;
+ } else if (typeof object.length == "number") {
+ // the object is array-like
+ resolve = Array;
+ }
+ resolve.forEach(object, block, context);
+ }
+};
diff --git a/clang-2690385/tools/scan-view/CMakeLists.txt b/clang-2690385/tools/scan-view/CMakeLists.txt
new file mode 100644
index 00000000..b305ca56
--- /dev/null
+++ b/clang-2690385/tools/scan-view/CMakeLists.txt
@@ -0,0 +1,41 @@
+option(CLANG_INSTALL_SCANVIEW "Install the scan-view tool" ON)
+
+set(BinFiles
+ scan-view)
+
+set(ShareFiles
+ ScanView.py
+ Reporter.py
+ startfile.py
+ FileRadar.scpt
+ GetRadarVersion.scpt
+ bugcatcher.ico)
+
+if(CLANG_INSTALL_SCANVIEW)
+ foreach(BinFile ${BinFiles})
+ add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/bin/${BinFile}
+ COMMAND ${CMAKE_COMMAND} -E make_directory
+ ${CMAKE_BINARY_DIR}/bin
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_CURRENT_SOURCE_DIR}/bin/${BinFile}
+ ${CMAKE_BINARY_DIR}/bin/
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/bin/${BinFile})
+ list(APPEND Depends ${CMAKE_BINARY_DIR}/bin/${BinFile})
+ install(PROGRAMS bin/${BinFile} DESTINATION bin)
+ endforeach()
+
+ foreach(ShareFile ${ShareFiles})
+ add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/share/scan-view/${ShareFile}
+ COMMAND ${CMAKE_COMMAND} -E make_directory
+ ${CMAKE_BINARY_DIR}/share/scan-view
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_CURRENT_SOURCE_DIR}/share/${ShareFile}
+ ${CMAKE_BINARY_DIR}/share/scan-view/
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/share/${ShareFile})
+ list(APPEND Depends ${CMAKE_BINARY_DIR}/share/scan-view/${ShareFile})
+ install(FILES share/${ShareFile} DESTINATION share/scan-view)
+ endforeach()
+
+ add_custom_target(scan-view ALL DEPENDS ${Depends})
+ set_target_properties(scan-view PROPERTIES FOLDER "Misc")
+endif()
diff --git a/clang-2690385/tools/scan-view/Makefile b/clang-2690385/tools/scan-view/Makefile
new file mode 100644
index 00000000..37e4404d
--- /dev/null
+++ b/clang-2690385/tools/scan-view/Makefile
@@ -0,0 +1,37 @@
+##===- tools/scan-view/Makefile ----------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../..
+
+include $(CLANG_LEVEL)/../../Makefile.config
+include $(CLANG_LEVEL)/Makefile
+
+CLANG_INSTALL_SCANVIEW ?= 1
+
+ifeq ($(CLANG_INSTALL_SCANVIEW), 1)
+ InstallTargets := $(ToolDir)/scan-view \
+ $(ShareDir)/scan-view/Reporter.py \
+ $(ShareDir)/scan-view/ScanView.py \
+ $(ShareDir)/scan-view/startfile.py \
+ $(ShareDir)/scan-view/FileRadar.scpt \
+ $(ShareDir)/scan-view/GetRadarVersion.scpt \
+ $(ShareDir)/scan-view/bugcatcher.ico
+endif
+
+all:: $(InstallTargets)
+
+$(ToolDir)/%: bin/% Makefile $(ToolDir)/.dir
+ $(Echo) "Copying $(notdir $<) to the 'bin' directory..."
+ $(Verb)cp $< $@
+ $(Verb)chmod +x $@
+
+$(ShareDir)/scan-view/%: share/% Makefile $(ShareDir)/scan-view/.dir
+ $(Echo) "Copying $(notdir $<) to the 'share' directory..."
+ $(Verb)cp $< $@
+
diff --git a/clang-2690385/tools/scan-view/bin/scan-view b/clang-2690385/tools/scan-view/bin/scan-view
new file mode 100755
index 00000000..1b6e8ba9
--- /dev/null
+++ b/clang-2690385/tools/scan-view/bin/scan-view
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+
+"""The clang static analyzer results viewer.
+"""
+
+import sys
+import imp
+import os
+import posixpath
+import thread
+import time
+import urllib
+import webbrowser
+
+# How long to wait for server to start.
+kSleepTimeout = .05
+kMaxSleeps = int(60 / kSleepTimeout)
+
+# Default server parameters
+
+kDefaultHost = '127.0.0.1'
+kDefaultPort = 8181
+kMaxPortsToTry = 100
+
+###
+
+
+def url_is_up(url):
+ try:
+ o = urllib.urlopen(url)
+ except IOError:
+ return False
+ o.close()
+ return True
+
+
+def start_browser(port, options):
+ import urllib
+ import webbrowser
+
+ url = 'http://%s:%d' % (options.host, port)
+
+ # Wait for server to start...
+ if options.debug:
+ sys.stderr.write('%s: Waiting for server.' % sys.argv[0])
+ sys.stderr.flush()
+ for i in range(kMaxSleeps):
+ if url_is_up(url):
+ break
+ if options.debug:
+ sys.stderr.write('.')
+ sys.stderr.flush()
+ time.sleep(kSleepTimeout)
+ else:
+ print >> sys.stderr, 'WARNING: Unable to detect that server started.'
+
+ if options.debug:
+ print >> sys.stderr, '%s: Starting webbrowser...' % sys.argv[0]
+ webbrowser.open(url)
+
+
+def run(port, options, root):
+ # Prefer to look relative to the installed binary
+ share = os.path.dirname(__file__) + "/../share/scan-view"
+ if not os.path.isdir(share):
+ # Otherwise look relative to the source
+ share = os.path.dirname(__file__) + "/../../scan-view/share"
+ sys.path.append(share)
+
+ import ScanView
+ try:
+ print 'Starting scan-view at: http://%s:%d' % (options.host,
+ port)
+ print ' Use Ctrl-C to exit.'
+ httpd = ScanView.create_server((options.host, port),
+ options, root)
+ httpd.serve_forever()
+ except KeyboardInterrupt:
+ pass
+
+
+def port_is_open(port):
+ import SocketServer
+ try:
+ t = SocketServer.TCPServer((kDefaultHost, port), None)
+ except:
+ return False
+ t.server_close()
+ return True
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="The clang static analyzer "
+ "results viewer.")
+ parser.add_argument("root", metavar="<results directory>", type=str)
+ parser.add_argument(
+ '--host', dest="host", default=kDefaultHost, type=str,
+ help="Host interface to listen on. (default=%s)" % kDefaultHost)
+ parser.add_argument('--port', dest="port", default=None, type=int,
+ help="Port to listen on. (default=%s)" % kDefaultPort)
+ parser.add_argument("--debug", dest="debug", default=0,
+ action="count",
+ help="Print additional debugging information.")
+ parser.add_argument("--auto-reload", dest="autoReload", default=False,
+ action="store_true",
+ help="Automatically update module for each request.")
+ parser.add_argument("--no-browser", dest="startBrowser", default=True,
+ action="store_false",
+ help="Don't open a webbrowser on startup.")
+ parser.add_argument("--allow-all-hosts", dest="onlyServeLocal",
+ default=True, action="store_false",
+ help='Allow connections from any host (access '
+ 'restricted to "127.0.0.1" by default)')
+ args = parser.parse_args()
+
+ # Make sure this directory is in a reasonable state to view.
+ if not posixpath.exists(posixpath.join(args.root, 'index.html')):
+ parser.error('Invalid directory, analysis results not found!')
+
+ # Find an open port. We aren't particularly worried about race
+ # conditions here. Note that if the user specified a port we only
+ # use that one.
+ if args.port is not None:
+ port = args.port
+ else:
+ for i in range(kMaxPortsToTry):
+ if port_is_open(kDefaultPort + i):
+ port = kDefaultPort + i
+ break
+ else:
+ parser.error('Unable to find usable port in [%d,%d)' %
+ (kDefaultPort, kDefaultPort+kMaxPortsToTry))
+
+ # Kick off thread to wait for server and start web browser, if
+ # requested.
+ if args.startBrowser:
+ t = thread.start_new_thread(start_browser, (port, args))
+
+ run(port, args, args.root)
+
+if __name__ == '__main__':
+ main()
diff --git a/clang-2690385/tools/scan-view/share/FileRadar.scpt b/clang-2690385/tools/scan-view/share/FileRadar.scpt
new file mode 100644
index 00000000..1c745528
--- /dev/null
+++ b/clang-2690385/tools/scan-view/share/FileRadar.scpt
Binary files differ
diff --git a/clang-2690385/tools/scan-view/share/GetRadarVersion.scpt b/clang-2690385/tools/scan-view/share/GetRadarVersion.scpt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/clang-2690385/tools/scan-view/share/GetRadarVersion.scpt
diff --git a/clang-2690385/tools/scan-view/share/Reporter.py b/clang-2690385/tools/scan-view/share/Reporter.py
new file mode 100644
index 00000000..294e05b4
--- /dev/null
+++ b/clang-2690385/tools/scan-view/share/Reporter.py
@@ -0,0 +1,248 @@
+"""Methods for reporting bugs."""
+
+import subprocess, sys, os
+
+__all__ = ['ReportFailure', 'BugReport', 'getReporters']
+
+#
+
+class ReportFailure(Exception):
+ """Generic exception for failures in bug reporting."""
+ def __init__(self, value):
+ self.value = value
+
+# Collect information about a bug.
+
+class BugReport:
+ def __init__(self, title, description, files):
+ self.title = title
+ self.description = description
+ self.files = files
+
+# Reporter interfaces.
+
+import os
+
+import email, mimetypes, smtplib
+from email import encoders
+from email.message import Message
+from email.mime.base import MIMEBase
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+
+#===------------------------------------------------------------------------===#
+# ReporterParameter
+#===------------------------------------------------------------------------===#
+
+class ReporterParameter:
+ def __init__(self, n):
+ self.name = n
+ def getName(self):
+ return self.name
+ def getValue(self,r,bugtype,getConfigOption):
+ return getConfigOption(r.getName(),self.getName())
+ def saveConfigValue(self):
+ return True
+
+class TextParameter (ReporterParameter):
+ def getHTML(self,r,bugtype,getConfigOption):
+ return """\
+<tr>
+<td class="form_clabel">%s:</td>
+<td class="form_value"><input type="text" name="%s_%s" value="%s"></td>
+</tr>"""%(self.getName(),r.getName(),self.getName(),self.getValue(r,bugtype,getConfigOption))
+
+class SelectionParameter (ReporterParameter):
+ def __init__(self, n, values):
+ ReporterParameter.__init__(self,n)
+ self.values = values
+
+ def getHTML(self,r,bugtype,getConfigOption):
+ default = self.getValue(r,bugtype,getConfigOption)
+ return """\
+<tr>
+<td class="form_clabel">%s:</td><td class="form_value"><select name="%s_%s">
+%s
+</select></td>"""%(self.getName(),r.getName(),self.getName(),'\n'.join(["""\
+<option value="%s"%s>%s</option>"""%(o[0],
+ o[0] == default and ' selected="selected"' or '',
+ o[1]) for o in self.values]))
+
+#===------------------------------------------------------------------------===#
+# Reporters
+#===------------------------------------------------------------------------===#
+
+class EmailReporter:
+ def getName(self):
+ return 'Email'
+
+ def getParameters(self):
+ return map(lambda x:TextParameter(x),['To', 'From', 'SMTP Server', 'SMTP Port'])
+
+ # Lifted from python email module examples.
+ def attachFile(self, outer, path):
+ # Guess the content type based on the file's extension. Encoding
+ # will be ignored, although we should check for simple things like
+ # gzip'd or compressed files.
+ ctype, encoding = mimetypes.guess_type(path)
+ if ctype is None or encoding is not None:
+ # No guess could be made, or the file is encoded (compressed), so
+ # use a generic bag-of-bits type.
+ ctype = 'application/octet-stream'
+ maintype, subtype = ctype.split('/', 1)
+ if maintype == 'text':
+ fp = open(path)
+ # Note: we should handle calculating the charset
+ msg = MIMEText(fp.read(), _subtype=subtype)
+ fp.close()
+ else:
+ fp = open(path, 'rb')
+ msg = MIMEBase(maintype, subtype)
+ msg.set_payload(fp.read())
+ fp.close()
+ # Encode the payload using Base64
+ encoders.encode_base64(msg)
+ # Set the filename parameter
+ msg.add_header('Content-Disposition', 'attachment', filename=os.path.basename(path))
+ outer.attach(msg)
+
+ def fileReport(self, report, parameters):
+ mainMsg = """\
+BUG REPORT
+---
+Title: %s
+Description: %s
+"""%(report.title, report.description)
+
+ if not parameters.get('To'):
+ raise ReportFailure('No "To" address specified.')
+ if not parameters.get('From'):
+ raise ReportFailure('No "From" address specified.')
+
+ msg = MIMEMultipart()
+ msg['Subject'] = 'BUG REPORT: %s'%(report.title)
+ # FIXME: Get config parameters
+ msg['To'] = parameters.get('To')
+ msg['From'] = parameters.get('From')
+ msg.preamble = mainMsg
+
+ msg.attach(MIMEText(mainMsg, _subtype='text/plain'))
+ for file in report.files:
+ self.attachFile(msg, file)
+
+ try:
+ s = smtplib.SMTP(host=parameters.get('SMTP Server'),
+ port=parameters.get('SMTP Port'))
+ s.sendmail(msg['From'], msg['To'], msg.as_string())
+ s.close()
+ except:
+ raise ReportFailure('Unable to send message via SMTP.')
+
+ return "Message sent!"
+
+class BugzillaReporter:
+ def getName(self):
+ return 'Bugzilla'
+
+ def getParameters(self):
+ return map(lambda x:TextParameter(x),['URL','Product'])
+
+ def fileReport(self, report, parameters):
+ raise NotImplementedError
+
+
+class RadarClassificationParameter(SelectionParameter):
+ def __init__(self):
+ SelectionParameter.__init__(self,"Classification",
+ [['1', 'Security'], ['2', 'Crash/Hang/Data Loss'],
+ ['3', 'Performance'], ['4', 'UI/Usability'],
+ ['6', 'Serious Bug'], ['7', 'Other']])
+
+ def saveConfigValue(self):
+ return False
+
+ def getValue(self,r,bugtype,getConfigOption):
+ if bugtype.find("leak") != -1:
+ return '3'
+ elif bugtype.find("dereference") != -1:
+ return '2'
+ elif bugtype.find("missing ivar release") != -1:
+ return '3'
+ else:
+ return '7'
+
+class RadarReporter:
+ @staticmethod
+ def isAvailable():
+ # FIXME: Find this .scpt better
+ path = os.path.join(os.path.dirname(__file__),'../share/scan-view/GetRadarVersion.scpt')
+ try:
+ p = subprocess.Popen(['osascript',path],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except:
+ return False
+ data,err = p.communicate()
+ res = p.wait()
+ # FIXME: Check version? Check for no errors?
+ return res == 0
+
+ def getName(self):
+ return 'Radar'
+
+ def getParameters(self):
+ return [ TextParameter('Component'), TextParameter('Component Version'),
+ RadarClassificationParameter() ]
+
+ def fileReport(self, report, parameters):
+ component = parameters.get('Component', '')
+ componentVersion = parameters.get('Component Version', '')
+ classification = parameters.get('Classification', '')
+ personID = ""
+ diagnosis = ""
+ config = ""
+
+ if not component.strip():
+ component = 'Bugs found by clang Analyzer'
+ if not componentVersion.strip():
+ componentVersion = 'X'
+
+ script = os.path.join(os.path.dirname(__file__),'../share/scan-view/FileRadar.scpt')
+ args = ['osascript', script, component, componentVersion, classification, personID, report.title,
+ report.description, diagnosis, config] + map(os.path.abspath, report.files)
+# print >>sys.stderr, args
+ try:
+ p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except:
+ raise ReportFailure("Unable to file radar (AppleScript failure).")
+ data, err = p.communicate()
+ res = p.wait()
+
+ if res:
+ raise ReportFailure("Unable to file radar (AppleScript failure).")
+
+ try:
+ values = eval(data)
+ except:
+ raise ReportFailure("Unable to process radar results.")
+
+ # We expect (int: bugID, str: message)
+ if len(values) != 2 or not isinstance(values[0], int):
+ raise ReportFailure("Unable to process radar results.")
+
+ bugID,message = values
+ bugID = int(bugID)
+
+ if not bugID:
+ raise ReportFailure(message)
+
+ return "Filed: <a href=\"rdar://%d/\">%d</a>"%(bugID,bugID)
+
+###
+
+def getReporters():
+ reporters = []
+ if RadarReporter.isAvailable():
+ reporters.append(RadarReporter())
+ reporters.append(EmailReporter())
+ return reporters
+
diff --git a/clang-2690385/tools/scan-view/share/ScanView.py b/clang-2690385/tools/scan-view/share/ScanView.py
new file mode 100644
index 00000000..7dc0351e
--- /dev/null
+++ b/clang-2690385/tools/scan-view/share/ScanView.py
@@ -0,0 +1,767 @@
+import BaseHTTPServer
+import SimpleHTTPServer
+import os
+import sys
+import urllib, urlparse
+import posixpath
+import StringIO
+import re
+import shutil
+import threading
+import time
+import socket
+import itertools
+
+import Reporter
+import ConfigParser
+
+###
+# Various patterns matched or replaced by server.
+
+kReportFileRE = re.compile('(.*/)?report-(.*)\\.html')
+
+kBugKeyValueRE = re.compile('<!-- BUG([^ ]*) (.*) -->')
+
+# <!-- REPORTPROBLEM file="crashes/clang_crash_ndSGF9.mi" stderr="crashes/clang_crash_ndSGF9.mi.stderr.txt" info="crashes/clang_crash_ndSGF9.mi.info" -->
+
+kReportCrashEntryRE = re.compile('<!-- REPORTPROBLEM (.*?)-->')
+kReportCrashEntryKeyValueRE = re.compile(' ?([^=]+)="(.*?)"')
+
+kReportReplacements = []
+
+# Add custom javascript.
+kReportReplacements.append((re.compile('<!-- SUMMARYENDHEAD -->'), """\
+<script language="javascript" type="text/javascript">
+function load(url) {
+ if (window.XMLHttpRequest) {
+ req = new XMLHttpRequest();
+ } else if (window.ActiveXObject) {
+ req = new ActiveXObject("Microsoft.XMLHTTP");
+ }
+ if (req != undefined) {
+ req.open("GET", url, true);
+ req.send("");
+ }
+}
+</script>"""))
+
+# Insert additional columns.
+kReportReplacements.append((re.compile('<!-- REPORTBUGCOL -->'),
+ '<td></td><td></td>'))
+
+# Insert report bug and open file links.
+kReportReplacements.append((re.compile('<!-- REPORTBUG id="report-(.*)\\.html" -->'),
+ ('<td class="Button"><a href="report/\\1">Report Bug</a></td>' +
+ '<td class="Button"><a href="javascript:load(\'open/\\1\')">Open File</a></td>')))
+
+kReportReplacements.append((re.compile('<!-- REPORTHEADER -->'),
+ '<h3><a href="/">Summary</a> > Report %(report)s</h3>'))
+
+kReportReplacements.append((re.compile('<!-- REPORTSUMMARYEXTRA -->'),
+ '<td class="Button"><a href="report/%(report)s">Report Bug</a></td>'))
+
+# Insert report crashes link.
+
+# Disabled for the time being until we decide exactly when this should
+# be enabled. Also the radar reporter needs to be fixed to report
+# multiple files.
+
+#kReportReplacements.append((re.compile('<!-- REPORTCRASHES -->'),
+# '<br>These files will automatically be attached to ' +
+# 'reports filed here: <a href="report_crashes">Report Crashes</a>.'))
+
+###
+# Other simple parameters
+
+kShare = posixpath.join(posixpath.dirname(__file__), '../share/scan-view')
+kConfigPath = os.path.expanduser('~/.scanview.cfg')
+
+###
+
+__version__ = "0.1"
+
+__all__ = ["create_server"]
+
+class ReporterThread(threading.Thread):
+ def __init__(self, report, reporter, parameters, server):
+ threading.Thread.__init__(self)
+ self.report = report
+ self.server = server
+ self.reporter = reporter
+ self.parameters = parameters
+ self.success = False
+ self.status = None
+
+ def run(self):
+ result = None
+ try:
+ if self.server.options.debug:
+ print >>sys.stderr, "%s: SERVER: submitting bug."%(sys.argv[0],)
+ self.status = self.reporter.fileReport(self.report, self.parameters)
+ self.success = True
+ time.sleep(3)
+ if self.server.options.debug:
+ print >>sys.stderr, "%s: SERVER: submission complete."%(sys.argv[0],)
+ except Reporter.ReportFailure,e:
+ self.status = e.value
+ except Exception,e:
+ s = StringIO.StringIO()
+ import traceback
+ print >>s,'<b>Unhandled Exception</b><br><pre>'
+ traceback.print_exc(e,file=s)
+ print >>s,'</pre>'
+ self.status = s.getvalue()
+
+class ScanViewServer(BaseHTTPServer.HTTPServer):
+ def __init__(self, address, handler, root, reporters, options):
+ BaseHTTPServer.HTTPServer.__init__(self, address, handler)
+ self.root = root
+ self.reporters = reporters
+ self.options = options
+ self.halted = False
+ self.config = None
+ self.load_config()
+
+ def load_config(self):
+ self.config = ConfigParser.RawConfigParser()
+
+ # Add defaults
+ self.config.add_section('ScanView')
+ for r in self.reporters:
+ self.config.add_section(r.getName())
+ for p in r.getParameters():
+ if p.saveConfigValue():
+ self.config.set(r.getName(), p.getName(), '')
+
+ # Ignore parse errors
+ try:
+ self.config.read([kConfigPath])
+ except:
+ pass
+
+ # Save on exit
+ import atexit
+ atexit.register(lambda: self.save_config())
+
+ def save_config(self):
+ # Ignore errors (only called on exit).
+ try:
+ f = open(kConfigPath,'w')
+ self.config.write(f)
+ f.close()
+ except:
+ pass
+
+ def halt(self):
+ self.halted = True
+ if self.options.debug:
+ print >>sys.stderr, "%s: SERVER: halting." % (sys.argv[0],)
+
+ def serve_forever(self):
+ while not self.halted:
+ if self.options.debug > 1:
+ print >>sys.stderr, "%s: SERVER: waiting..." % (sys.argv[0],)
+ try:
+ self.handle_request()
+ except OSError,e:
+ print 'OSError',e.errno
+
+ def finish_request(self, request, client_address):
+ if self.options.autoReload:
+ import ScanView
+ self.RequestHandlerClass = reload(ScanView).ScanViewRequestHandler
+ BaseHTTPServer.HTTPServer.finish_request(self, request, client_address)
+
+ def handle_error(self, request, client_address):
+ # Ignore socket errors
+ info = sys.exc_info()
+ if info and isinstance(info[1], socket.error):
+ if self.options.debug > 1:
+ print >>sys.stderr, "%s: SERVER: ignored socket error." % (sys.argv[0],)
+ return
+ BaseHTTPServer.HTTPServer.handle_error(self, request, client_address)
+
+# Borrowed from Quixote, with simplifications.
+def parse_query(qs, fields=None):
+ if fields is None:
+ fields = {}
+ for chunk in filter(None, qs.split('&')):
+ if '=' not in chunk:
+ name = chunk
+ value = ''
+ else:
+ name, value = chunk.split('=', 1)
+ name = urllib.unquote(name.replace('+', ' '))
+ value = urllib.unquote(value.replace('+', ' '))
+ item = fields.get(name)
+ if item is None:
+ fields[name] = [value]
+ else:
+ item.append(value)
+ return fields
+
+class ScanViewRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
+ server_version = "ScanViewServer/" + __version__
+ dynamic_mtime = time.time()
+
+ def do_HEAD(self):
+ try:
+ SimpleHTTPServer.SimpleHTTPRequestHandler.do_HEAD(self)
+ except Exception,e:
+ self.handle_exception(e)
+
+ def do_GET(self):
+ try:
+ SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
+ except Exception,e:
+ self.handle_exception(e)
+
+ def do_POST(self):
+ """Serve a POST request."""
+ try:
+ length = self.headers.getheader('content-length') or "0"
+ try:
+ length = int(length)
+ except:
+ length = 0
+ content = self.rfile.read(length)
+ fields = parse_query(content)
+ f = self.send_head(fields)
+ if f:
+ self.copyfile(f, self.wfile)
+ f.close()
+ except Exception,e:
+ self.handle_exception(e)
+
+ def log_message(self, format, *args):
+ if self.server.options.debug:
+ sys.stderr.write("%s: SERVER: %s - - [%s] %s\n" %
+ (sys.argv[0],
+ self.address_string(),
+ self.log_date_time_string(),
+ format%args))
+
+ def load_report(self, report):
+ path = os.path.join(self.server.root, 'report-%s.html'%report)
+ data = open(path).read()
+ keys = {}
+ for item in kBugKeyValueRE.finditer(data):
+ k,v = item.groups()
+ keys[k] = v
+ return keys
+
+ def load_crashes(self):
+ path = posixpath.join(self.server.root, 'index.html')
+ data = open(path).read()
+ problems = []
+ for item in kReportCrashEntryRE.finditer(data):
+ fieldData = item.group(1)
+ fields = dict([i.groups() for i in
+ kReportCrashEntryKeyValueRE.finditer(fieldData)])
+ problems.append(fields)
+ return problems
+
+ def handle_exception(self, exc):
+ import traceback
+ s = StringIO.StringIO()
+ print >>s, "INTERNAL ERROR\n"
+ traceback.print_exc(exc, s)
+ f = self.send_string(s.getvalue(), 'text/plain')
+ if f:
+ self.copyfile(f, self.wfile)
+ f.close()
+
+ def get_scalar_field(self, name):
+ if name in self.fields:
+ return self.fields[name][0]
+ else:
+ return None
+
+ def submit_bug(self, c):
+ title = self.get_scalar_field('title')
+ description = self.get_scalar_field('description')
+ report = self.get_scalar_field('report')
+ reporterIndex = self.get_scalar_field('reporter')
+ files = []
+ for fileID in self.fields.get('files',[]):
+ try:
+ i = int(fileID)
+ except:
+ i = None
+ if i is None or i<0 or i>=len(c.files):
+ return (False, 'Invalid file ID')
+ files.append(c.files[i])
+
+ if not title:
+ return (False, "Missing title.")
+ if not description:
+ return (False, "Missing description.")
+ try:
+ reporterIndex = int(reporterIndex)
+ except:
+ return (False, "Invalid report method.")
+
+ # Get the reporter and parameters.
+ reporter = self.server.reporters[reporterIndex]
+ parameters = {}
+ for o in reporter.getParameters():
+ name = '%s_%s'%(reporter.getName(),o.getName())
+ if name not in self.fields:
+ return (False,
+ 'Missing field "%s" for %s report method.'%(name,
+ reporter.getName()))
+ parameters[o.getName()] = self.get_scalar_field(name)
+
+ # Update config defaults.
+ if report != 'None':
+ self.server.config.set('ScanView', 'reporter', reporterIndex)
+ for o in reporter.getParameters():
+ if o.saveConfigValue():
+ name = o.getName()
+ self.server.config.set(reporter.getName(), name, parameters[name])
+
+ # Create the report.
+ bug = Reporter.BugReport(title, description, files)
+
+ # Kick off a reporting thread.
+ t = ReporterThread(bug, reporter, parameters, self.server)
+ t.start()
+
+ # Wait for thread to die...
+ while t.isAlive():
+ time.sleep(.25)
+ submitStatus = t.status
+
+ return (t.success, t.status)
+
+ def send_report_submit(self):
+ report = self.get_scalar_field('report')
+ c = self.get_report_context(report)
+ if c.reportSource is None:
+ reportingFor = "Report Crashes > "
+ fileBug = """\
+<a href="/report_crashes">File Bug</a> > """%locals()
+ else:
+ reportingFor = '<a href="/%s">Report %s</a> > ' % (c.reportSource,
+ report)
+ fileBug = '<a href="/report/%s">File Bug</a> > ' % report
+ title = self.get_scalar_field('title')
+ description = self.get_scalar_field('description')
+
+ res,message = self.submit_bug(c)
+
+ if res:
+ statusClass = 'SubmitOk'
+ statusName = 'Succeeded'
+ else:
+ statusClass = 'SubmitFail'
+ statusName = 'Failed'
+
+ result = """
+<head>
+ <title>Bug Submission</title>
+ <link rel="stylesheet" type="text/css" href="/scanview.css" />
+</head>
+<body>
+<h3>
+<a href="/">Summary</a> >
+%(reportingFor)s
+%(fileBug)s
+Submit</h3>
+<form name="form" action="">
+<table class="form">
+<tr><td>
+<table class="form_group">
+<tr>
+ <td class="form_clabel">Title:</td>
+ <td class="form_value">
+ <input type="text" name="title" size="50" value="%(title)s" disabled>
+ </td>
+</tr>
+<tr>
+ <td class="form_label">Description:</td>
+ <td class="form_value">
+<textarea rows="10" cols="80" name="description" disabled>
+%(description)s
+</textarea>
+ </td>
+</table>
+</td></tr>
+</table>
+</form>
+<h1 class="%(statusClass)s">Submission %(statusName)s</h1>
+%(message)s
+<p>
+<hr>
+<a href="/">Return to Summary</a>
+</body>
+</html>"""%locals()
+ return self.send_string(result)
+
+ def send_open_report(self, report):
+ try:
+ keys = self.load_report(report)
+ except IOError:
+ return self.send_error(400, 'Invalid report.')
+
+ file = keys.get('FILE')
+ if not file or not posixpath.exists(file):
+ return self.send_error(400, 'File does not exist: "%s"' % file)
+
+ import startfile
+ if self.server.options.debug:
+ print >>sys.stderr, '%s: SERVER: opening "%s"'%(sys.argv[0],
+ file)
+
+ status = startfile.open(file)
+ if status:
+ res = 'Opened: "%s"' % file
+ else:
+ res = 'Open failed: "%s"' % file
+
+ return self.send_string(res, 'text/plain')
+
+ def get_report_context(self, report):
+ class Context:
+ pass
+ if report is None or report == 'None':
+ data = self.load_crashes()
+ # Don't allow empty reports.
+ if not data:
+ raise ValueError, 'No crashes detected!'
+ c = Context()
+ c.title = 'clang static analyzer failures'
+
+ stderrSummary = ""
+ for item in data:
+ if 'stderr' in item:
+ path = posixpath.join(self.server.root, item['stderr'])
+ if os.path.exists(path):
+ lns = itertools.islice(open(path), 0, 10)
+ stderrSummary += '%s\n--\n%s' % (item.get('src',
+ '<unknown>'),
+ ''.join(lns))
+
+ c.description = """\
+The clang static analyzer failed on these inputs:
+%s
+
+STDERR Summary
+--------------
+%s
+""" % ('\n'.join([item.get('src','<unknown>') for item in data]),
+ stderrSummary)
+ c.reportSource = None
+ c.navMarkup = "Report Crashes > "
+ c.files = []
+ for item in data:
+ c.files.append(item.get('src',''))
+ c.files.append(posixpath.join(self.server.root,
+ item.get('file','')))
+ c.files.append(posixpath.join(self.server.root,
+ item.get('clangfile','')))
+ c.files.append(posixpath.join(self.server.root,
+ item.get('stderr','')))
+ c.files.append(posixpath.join(self.server.root,
+ item.get('info','')))
+ # Just in case something failed, ignore files which don't
+ # exist.
+ c.files = [f for f in c.files
+ if os.path.exists(f) and os.path.isfile(f)]
+ else:
+ # Check that this is a valid report.
+ path = posixpath.join(self.server.root, 'report-%s.html' % report)
+ if not posixpath.exists(path):
+ raise ValueError, 'Invalid report ID'
+ keys = self.load_report(report)
+ c = Context()
+ c.title = keys.get('DESC','clang error (unrecognized')
+ c.description = """\
+Bug reported by the clang static analyzer.
+
+Description: %s
+File: %s
+Line: %s
+"""%(c.title, keys.get('FILE','<unknown>'), keys.get('LINE', '<unknown>'))
+ c.reportSource = 'report-%s.html' % report
+ c.navMarkup = """<a href="/%s">Report %s</a> > """ % (c.reportSource,
+ report)
+
+ c.files = [path]
+ return c
+
+ def send_report(self, report, configOverrides=None):
+ def getConfigOption(section, field):
+ if (configOverrides is not None and
+ section in configOverrides and
+ field in configOverrides[section]):
+ return configOverrides[section][field]
+ return self.server.config.get(section, field)
+
+ # report is None is used for crashes
+ try:
+ c = self.get_report_context(report)
+ except ValueError, e:
+ return self.send_error(400, e.message)
+
+ title = c.title
+ description= c.description
+ reportingFor = c.navMarkup
+ if c.reportSource is None:
+ extraIFrame = ""
+ else:
+ extraIFrame = """\
+<iframe src="/%s" width="100%%" height="40%%"
+ scrolling="auto" frameborder="1">
+ <a href="/%s">View Bug Report</a>
+</iframe>""" % (c.reportSource, c.reportSource)
+
+ reporterSelections = []
+ reporterOptions = []
+
+ try:
+ active = int(getConfigOption('ScanView','reporter'))
+ except:
+ active = 0
+ for i,r in enumerate(self.server.reporters):
+ selected = (i == active)
+ if selected:
+ selectedStr = ' selected'
+ else:
+ selectedStr = ''
+ reporterSelections.append('<option value="%d"%s>%s</option>'%(i,selectedStr,r.getName()))
+ options = '\n'.join([ o.getHTML(r,title,getConfigOption) for o in r.getParameters()])
+ display = ('none','')[selected]
+ reporterOptions.append("""\
+<tr id="%sReporterOptions" style="display:%s">
+ <td class="form_label">%s Options</td>
+ <td class="form_value">
+ <table class="form_inner_group">
+%s
+ </table>
+ </td>
+</tr>
+"""%(r.getName(),display,r.getName(),options))
+ reporterSelections = '\n'.join(reporterSelections)
+ reporterOptionsDivs = '\n'.join(reporterOptions)
+ reportersArray = '[%s]'%(','.join([`r.getName()` for r in self.server.reporters]))
+
+ if c.files:
+ fieldSize = min(5, len(c.files))
+ attachFileOptions = '\n'.join(["""\
+<option value="%d" selected>%s</option>""" % (i,v) for i,v in enumerate(c.files)])
+ attachFileRow = """\
+<tr>
+ <td class="form_label">Attach:</td>
+ <td class="form_value">
+<select style="width:100%%" name="files" multiple size=%d>
+%s
+</select>
+ </td>
+</tr>
+""" % (min(5, len(c.files)), attachFileOptions)
+ else:
+ attachFileRow = ""
+
+ result = """<html>
+<head>
+ <title>File Bug</title>
+ <link rel="stylesheet" type="text/css" href="/scanview.css" />
+</head>
+<script language="javascript" type="text/javascript">
+var reporters = %(reportersArray)s;
+function updateReporterOptions() {
+ index = document.getElementById('reporter').selectedIndex;
+ for (var i=0; i < reporters.length; ++i) {
+ o = document.getElementById(reporters[i] + "ReporterOptions");
+ if (i == index) {
+ o.style.display = "";
+ } else {
+ o.style.display = "none";
+ }
+ }
+}
+</script>
+<body onLoad="updateReporterOptions()">
+<h3>
+<a href="/">Summary</a> >
+%(reportingFor)s
+File Bug</h3>
+<form name="form" action="/report_submit" method="post">
+<input type="hidden" name="report" value="%(report)s">
+
+<table class="form">
+<tr><td>
+<table class="form_group">
+<tr>
+ <td class="form_clabel">Title:</td>
+ <td class="form_value">
+ <input type="text" name="title" size="50" value="%(title)s">
+ </td>
+</tr>
+<tr>
+ <td class="form_label">Description:</td>
+ <td class="form_value">
+<textarea rows="10" cols="80" name="description">
+%(description)s
+</textarea>
+ </td>
+</tr>
+
+%(attachFileRow)s
+
+</table>
+<br>
+<table class="form_group">
+<tr>
+ <td class="form_clabel">Method:</td>
+ <td class="form_value">
+ <select id="reporter" name="reporter" onChange="updateReporterOptions()">
+ %(reporterSelections)s
+ </select>
+ </td>
+</tr>
+%(reporterOptionsDivs)s
+</table>
+<br>
+</td></tr>
+<tr><td class="form_submit">
+ <input align="right" type="submit" name="Submit" value="Submit">
+</td></tr>
+</table>
+</form>
+
+%(extraIFrame)s
+
+</body>
+</html>"""%locals()
+
+ return self.send_string(result)
+
+ def send_head(self, fields=None):
+ if (self.server.options.onlyServeLocal and
+ self.client_address[0] != '127.0.0.1'):
+ return self.send_error(401, 'Unauthorized host.')
+
+ if fields is None:
+ fields = {}
+ self.fields = fields
+
+ o = urlparse.urlparse(self.path)
+ self.fields = parse_query(o.query, fields)
+ path = posixpath.normpath(urllib.unquote(o.path))
+
+ # Split the components and strip the root prefix.
+ components = path.split('/')[1:]
+
+ # Special case some top-level entries.
+ if components:
+ name = components[0]
+ if len(components)==2:
+ if name=='report':
+ return self.send_report(components[1])
+ elif name=='open':
+ return self.send_open_report(components[1])
+ elif len(components)==1:
+ if name=='quit':
+ self.server.halt()
+ return self.send_string('Goodbye.', 'text/plain')
+ elif name=='report_submit':
+ return self.send_report_submit()
+ elif name=='report_crashes':
+ overrides = { 'ScanView' : {},
+ 'Radar' : {},
+ 'Email' : {} }
+ for i,r in enumerate(self.server.reporters):
+ if r.getName() == 'Radar':
+ overrides['ScanView']['reporter'] = i
+ break
+ overrides['Radar']['Component'] = 'llvm - checker'
+ overrides['Radar']['Component Version'] = 'X'
+ return self.send_report(None, overrides)
+ elif name=='favicon.ico':
+ return self.send_path(posixpath.join(kShare,'bugcatcher.ico'))
+
+ # Match directory entries.
+ if components[-1] == '':
+ components[-1] = 'index.html'
+
+ relpath = '/'.join(components)
+ path = posixpath.join(self.server.root, relpath)
+
+ if self.server.options.debug > 1:
+ print >>sys.stderr, '%s: SERVER: sending path "%s"'%(sys.argv[0],
+ path)
+ return self.send_path(path)
+
+ def send_404(self):
+ self.send_error(404, "File not found")
+ return None
+
+ def send_path(self, path):
+ # If the requested path is outside the root directory, do not open it
+ rel = os.path.abspath(path)
+ if not rel.startswith(os.path.abspath(self.server.root)):
+ return self.send_404()
+
+ ctype = self.guess_type(path)
+ if ctype.startswith('text/'):
+ # Patch file instead
+ return self.send_patched_file(path, ctype)
+ else:
+ mode = 'rb'
+ try:
+ f = open(path, mode)
+ except IOError:
+ return self.send_404()
+ return self.send_file(f, ctype)
+
+ def send_file(self, f, ctype):
+ # Patch files to add links, but skip binary files.
+ self.send_response(200)
+ self.send_header("Content-type", ctype)
+ fs = os.fstat(f.fileno())
+ self.send_header("Content-Length", str(fs[6]))
+ self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
+ self.end_headers()
+ return f
+
+ def send_string(self, s, ctype='text/html', headers=True, mtime=None):
+ if headers:
+ self.send_response(200)
+ self.send_header("Content-type", ctype)
+ self.send_header("Content-Length", str(len(s)))
+ if mtime is None:
+ mtime = self.dynamic_mtime
+ self.send_header("Last-Modified", self.date_time_string(mtime))
+ self.end_headers()
+ return StringIO.StringIO(s)
+
+ def send_patched_file(self, path, ctype):
+ # Allow a very limited set of variables. This is pretty gross.
+ variables = {}
+ variables['report'] = ''
+ m = kReportFileRE.match(path)
+ if m:
+ variables['report'] = m.group(2)
+
+ try:
+ f = open(path,'r')
+ except IOError:
+ return self.send_404()
+ fs = os.fstat(f.fileno())
+ data = f.read()
+ for a,b in kReportReplacements:
+ data = a.sub(b % variables, data)
+ return self.send_string(data, ctype, mtime=fs.st_mtime)
+
+
+def create_server(address, options, root):
+ import Reporter
+
+ reporters = Reporter.getReporters()
+
+ return ScanViewServer(address, ScanViewRequestHandler,
+ root,
+ reporters,
+ options)
diff --git a/clang-2690385/tools/scan-view/share/bugcatcher.ico b/clang-2690385/tools/scan-view/share/bugcatcher.ico
new file mode 100644
index 00000000..22d39b59
--- /dev/null
+++ b/clang-2690385/tools/scan-view/share/bugcatcher.ico
Binary files differ
diff --git a/clang-2690385/tools/scan-view/share/startfile.py b/clang-2690385/tools/scan-view/share/startfile.py
new file mode 100644
index 00000000..e8fbe2d5
--- /dev/null
+++ b/clang-2690385/tools/scan-view/share/startfile.py
@@ -0,0 +1,203 @@
+"""Utility for opening a file using the default application in a cross-platform
+manner. Modified from http://code.activestate.com/recipes/511443/.
+"""
+
+__version__ = '1.1x'
+__all__ = ['open']
+
+import os
+import sys
+import webbrowser
+import subprocess
+
+_controllers = {}
+_open = None
+
+
+class BaseController(object):
+ '''Base class for open program controllers.'''
+
+ def __init__(self, name):
+ self.name = name
+
+ def open(self, filename):
+ raise NotImplementedError
+
+
+class Controller(BaseController):
+ '''Controller for a generic open program.'''
+
+ def __init__(self, *args):
+ super(Controller, self).__init__(os.path.basename(args[0]))
+ self.args = list(args)
+
+ def _invoke(self, cmdline):
+ if sys.platform[:3] == 'win':
+ closefds = False
+ startupinfo = subprocess.STARTUPINFO()
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+ else:
+ closefds = True
+ startupinfo = None
+
+ if (os.environ.get('DISPLAY') or sys.platform[:3] == 'win' or
+ sys.platform == 'darwin'):
+ inout = file(os.devnull, 'r+')
+ else:
+ # for TTY programs, we need stdin/out
+ inout = None
+
+ # if possible, put the child precess in separate process group,
+ # so keyboard interrupts don't affect child precess as well as
+ # Python
+ setsid = getattr(os, 'setsid', None)
+ if not setsid:
+ setsid = getattr(os, 'setpgrp', None)
+
+ pipe = subprocess.Popen(cmdline, stdin=inout, stdout=inout,
+ stderr=inout, close_fds=closefds,
+ preexec_fn=setsid, startupinfo=startupinfo)
+
+ # It is assumed that this kind of tools (gnome-open, kfmclient,
+ # exo-open, xdg-open and open for OSX) immediately exit after lauching
+ # the specific application
+ returncode = pipe.wait()
+ if hasattr(self, 'fixreturncode'):
+ returncode = self.fixreturncode(returncode)
+ return not returncode
+
+ def open(self, filename):
+ if isinstance(filename, basestring):
+ cmdline = self.args + [filename]
+ else:
+ # assume it is a sequence
+ cmdline = self.args + filename
+ try:
+ return self._invoke(cmdline)
+ except OSError:
+ return False
+
+
+# Platform support for Windows
+if sys.platform[:3] == 'win':
+
+ class Start(BaseController):
+ '''Controller for the win32 start progam through os.startfile.'''
+
+ def open(self, filename):
+ try:
+ os.startfile(filename)
+ except WindowsError:
+ # [Error 22] No application is associated with the specified
+ # file for this operation: '<URL>'
+ return False
+ else:
+ return True
+
+ _controllers['windows-default'] = Start('start')
+ _open = _controllers['windows-default'].open
+
+
+# Platform support for MacOS
+elif sys.platform == 'darwin':
+ _controllers['open']= Controller('open')
+ _open = _controllers['open'].open
+
+
+# Platform support for Unix
+else:
+
+ import commands
+
+ # @WARNING: use the private API of the webbrowser module
+ from webbrowser import _iscommand
+
+ class KfmClient(Controller):
+ '''Controller for the KDE kfmclient program.'''
+
+ def __init__(self, kfmclient='kfmclient'):
+ super(KfmClient, self).__init__(kfmclient, 'exec')
+ self.kde_version = self.detect_kde_version()
+
+ def detect_kde_version(self):
+ kde_version = None
+ try:
+ info = commands.getoutput('kde-config --version')
+
+ for line in info.splitlines():
+ if line.startswith('KDE'):
+ kde_version = line.split(':')[-1].strip()
+ break
+ except (OSError, RuntimeError):
+ pass
+
+ return kde_version
+
+ def fixreturncode(self, returncode):
+ if returncode is not None and self.kde_version > '3.5.4':
+ return returncode
+ else:
+ return os.EX_OK
+
+ def detect_desktop_environment():
+ '''Checks for known desktop environments
+
+ Return the desktop environments name, lowercase (kde, gnome, xfce)
+ or "generic"
+
+ '''
+
+ desktop_environment = 'generic'
+
+ if os.environ.get('KDE_FULL_SESSION') == 'true':
+ desktop_environment = 'kde'
+ elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
+ desktop_environment = 'gnome'
+ else:
+ try:
+ info = commands.getoutput('xprop -root _DT_SAVE_MODE')
+ if ' = "xfce4"' in info:
+ desktop_environment = 'xfce'
+ except (OSError, RuntimeError):
+ pass
+
+ return desktop_environment
+
+
+ def register_X_controllers():
+ if _iscommand('kfmclient'):
+ _controllers['kde-open'] = KfmClient()
+
+ for command in ('gnome-open', 'exo-open', 'xdg-open'):
+ if _iscommand(command):
+ _controllers[command] = Controller(command)
+
+ def get():
+ controllers_map = {
+ 'gnome': 'gnome-open',
+ 'kde': 'kde-open',
+ 'xfce': 'exo-open',
+ }
+
+ desktop_environment = detect_desktop_environment()
+
+ try:
+ controller_name = controllers_map[desktop_environment]
+ return _controllers[controller_name].open
+
+ except KeyError:
+ if _controllers.has_key('xdg-open'):
+ return _controllers['xdg-open'].open
+ else:
+ return webbrowser.open
+
+
+ if os.environ.get("DISPLAY"):
+ register_X_controllers()
+ _open = get()
+
+
+def open(filename):
+ '''Open a file or an URL in the registered default application.'''
+
+ return _open(filename)