summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiao Wang <miaowang@google.com>2016-11-23 12:42:46 -0800
committerMiao Wang <miaowang@google.com>2016-11-28 19:57:27 -0800
commit52dc95db19940892624a8a19061e3f6f764758fc (patch)
tree8796e56902ec6f0e9d5b50f9189064cffc01ddb5
parent841d9474a8b1951a282870105f739cdc13398e23 (diff)
downloadwindows-x86-52dc95db19940892624a8a19061e3f6f764758fc.tar.gz
Update prebuilt RenderScript to build 3518219.
Built from aosp-master, build 3518219. Bug: http://b/22377128 Change-Id: I6313c459f1c70e0beeec43c29817ed73f3ef2b88
-rw-r--r--current/MODULE_LICENSE_APACHE20
-rw-r--r--current/MODULE_LICENSE_BSD_LIKE0
-rw-r--r--current/MODULE_LICENSE_MIT0
-rw-r--r--current/NOTICE889
-rwxr-xr-xcurrent/bin/bcc_compat.exebin0 -> 152064 bytes
-rwxr-xr-xcurrent/bin/llvm-rs-cc.exebin0 -> 1255936 bytes
-rw-r--r--current/clang-include/__clang_cuda_cmath.h148
-rw-r--r--current/clang-include/__clang_cuda_intrinsics.h322
-rw-r--r--current/clang-include/__clang_cuda_math_forward_declares.h263
-rw-r--r--current/clang-include/__clang_cuda_runtime_wrapper.h316
-rw-r--r--current/clang-include/__stddef_max_align_t.h43
-rw-r--r--current/clang-include/__wmmintrin_aes.h151
-rw-r--r--current/clang-include/__wmmintrin_pclmul.h58
-rw-r--r--current/clang-include/adxintrin.h86
-rw-r--r--current/clang-include/altivec.h14458
-rw-r--r--current/clang-include/ammintrin.h194
-rw-r--r--current/clang-include/arm_acle.h312
-rw-r--r--current/clang-include/avx2intrin.h1299
-rw-r--r--current/clang-include/avx512bwintrin.h2410
-rw-r--r--current/clang-include/avx512cdintrin.h144
-rw-r--r--current/clang-include/avx512dqintrin.h1331
-rw-r--r--current/clang-include/avx512erintrin.h285
-rw-r--r--current/clang-include/avx512fintrin.h9543
-rw-r--r--current/clang-include/avx512ifmaintrin.h92
-rw-r--r--current/clang-include/avx512ifmavlintrin.h149
-rw-r--r--current/clang-include/avx512pfintrin.h111
-rw-r--r--current/clang-include/avx512vbmiintrin.h137
-rw-r--r--current/clang-include/avx512vbmivlintrin.h247
-rw-r--r--current/clang-include/avx512vlbwintrin.h3406
-rw-r--r--current/clang-include/avx512vlcdintrin.h263
-rw-r--r--current/clang-include/avx512vldqintrin.h1265
-rw-r--r--current/clang-include/avx512vlintrin.h9170
-rw-r--r--current/clang-include/avxintrin.h2928
-rw-r--r--current/clang-include/bmi2intrin.h95
-rw-r--r--current/clang-include/bmiintrin.h548
-rw-r--r--current/clang-include/clflushoptintrin.h41
-rw-r--r--current/clang-include/cpuid.h209
-rw-r--r--current/clang-include/cuda_builtin_vars.h126
-rw-r--r--current/clang-include/emmintrin.h2458
-rw-r--r--current/clang-include/f16cintrin.h124
-rw-r--r--current/clang-include/float.h134
-rw-r--r--current/clang-include/fma4intrin.h230
-rw-r--r--current/clang-include/fmaintrin.h228
-rw-r--r--current/clang-include/fxsrintrin.h55
-rw-r--r--current/clang-include/htmintrin.h226
-rw-r--r--current/clang-include/htmxlintrin.h363
-rw-r--r--current/clang-include/ia32intrin.h79
-rw-r--r--current/clang-include/immintrin.h283
-rw-r--r--current/clang-include/intrin.h957
-rw-r--r--current/clang-include/inttypes.h106
-rw-r--r--current/clang-include/iso646.h43
-rw-r--r--current/clang-include/limits.h118
-rw-r--r--current/clang-include/lzcntintrin.h68
-rw-r--r--current/clang-include/mm3dnow.h171
-rw-r--r--current/clang-include/mm_malloc.h75
-rw-r--r--current/clang-include/mmintrin.h1545
-rw-r--r--current/clang-include/module.modulemap164
-rw-r--r--current/clang-include/mwaitxintrin.h47
-rw-r--r--current/clang-include/nmmintrin.h30
-rw-r--r--current/clang-include/opencl-c.h16962
-rw-r--r--current/clang-include/pkuintrin.h48
-rw-r--r--current/clang-include/pmmintrin.h311
-rw-r--r--current/clang-include/popcntintrin.h98
-rw-r--r--current/clang-include/prfchwintrin.h45
-rw-r--r--current/clang-include/rdseedintrin.h56
-rw-r--r--current/clang-include/rtmintrin.h59
-rw-r--r--current/clang-include/s390intrin.h39
-rw-r--r--current/clang-include/shaintrin.h75
-rw-r--r--current/clang-include/smmintrin.h507
-rw-r--r--current/clang-include/stdalign.h35
-rw-r--r--current/clang-include/stdarg.h52
-rw-r--r--current/clang-include/stdatomic.h546
-rw-r--r--current/clang-include/stdbool.h44
-rw-r--r--current/clang-include/stddef.h137
-rw-r--r--current/clang-include/stdint.h707
-rw-r--r--current/clang-include/stdnoreturn.h30
-rw-r--r--current/clang-include/tbmintrin.h154
-rw-r--r--current/clang-include/tgmath.h1374
-rw-r--r--current/clang-include/tmmintrin.h773
-rw-r--r--current/clang-include/unwind.h299
-rw-r--r--current/clang-include/vadefs.h65
-rw-r--r--current/clang-include/varargs.h26
-rw-r--r--current/clang-include/vecintrin.h8946
-rw-r--r--current/clang-include/wmmintrin.h33
-rw-r--r--current/clang-include/x86intrin.h85
-rw-r--r--current/clang-include/xmmintrin.h2893
-rw-r--r--current/clang-include/xopintrin.h782
-rw-r--r--current/clang-include/xsavecintrin.h48
-rw-r--r--current/clang-include/xsaveintrin.h58
-rw-r--r--current/clang-include/xsaveoptintrin.h48
-rw-r--r--current/clang-include/xsavesintrin.h58
-rw-r--r--current/clang-include/xtestintrin.h41
-rwxr-xr-xcurrent/lib/libLLVM.dllbin0 -> 30570496 bytes
-rwxr-xr-xcurrent/lib/libbcc.dllbin0 -> 807424 bytes
-rwxr-xr-xcurrent/lib/libbcinfo.dllbin0 -> 456704 bytes
-rwxr-xr-xcurrent/lib/libclang.dllbin0 -> 18552832 bytes
-rw-r--r--current/manifest_3518219.xml540
-rwxr-xr-xcurrent/platform/arm/libRSSupport.sobin0 -> 1013504 bytes
-rwxr-xr-xcurrent/platform/arm/libRSSupportIO.sobin0 -> 154432 bytes
-rw-r--r--current/platform/arm/libRScpp_static.abin0 -> 1739340 bytes
-rwxr-xr-xcurrent/platform/arm/libblasV8.sobin0 -> 1081924 bytes
-rw-r--r--current/platform/arm/libcompiler_rt.abin0 -> 527684 bytes
-rw-r--r--current/platform/arm/librsrt.bcbin0 -> 297820 bytes
-rwxr-xr-xcurrent/platform/arm64/libRSSupport.sobin0 -> 1496320 bytes
-rwxr-xr-xcurrent/platform/arm64/libRSSupportIO.sobin0 -> 246904 bytes
-rw-r--r--current/platform/arm64/libRScpp_static.abin0 -> 2866966 bytes
-rwxr-xr-xcurrent/platform/arm64/libblasV8.sobin0 -> 1597584 bytes
-rw-r--r--current/platform/arm64/libcompiler_rt.abin0 -> 825590 bytes
-rw-r--r--current/platform/arm64/librsrt.bcbin0 -> 328568 bytes
-rwxr-xr-xcurrent/platform/mips/libRSSupport.sobin0 -> 1607172 bytes
-rwxr-xr-xcurrent/platform/mips/libRSSupportIO.sobin0 -> 408176 bytes
-rw-r--r--current/platform/mips/libRScpp_static.abin0 -> 1912352 bytes
-rwxr-xr-xcurrent/platform/mips/libblasV8.sobin0 -> 1921120 bytes
-rw-r--r--current/platform/mips/libcompiler_rt.abin0 -> 512680 bytes
-rw-r--r--current/platform/mips/librsrt.bcbin0 -> 297820 bytes
-rw-r--r--current/platform/renderscript-v8.jarbin0 -> 156675 bytes
-rw-r--r--current/platform/rs/cpp/RenderScript.h37
-rw-r--r--current/platform/rs/cpp/rsCppStructs.h4358
-rw-r--r--current/platform/rs/cpp/util/RefBase.h529
-rw-r--r--current/platform/rs/cpp/util/StrongPointer.h222
-rw-r--r--current/platform/rs/cpp/util/TypeHelpers.h302
-rw-r--r--current/platform/rs/rsDefines.h510
-rw-r--r--current/platform/rs/scriptc/rs_allocation_create.rsh1345
-rw-r--r--current/platform/rs/scriptc/rs_allocation_data.rsh3365
-rw-r--r--current/platform/rs/scriptc/rs_atomic.rsh257
-rw-r--r--current/platform/rs/scriptc/rs_convert.rsh1623
-rw-r--r--current/platform/rs/scriptc/rs_core.rsh61
-rw-r--r--current/platform/rs/scriptc/rs_debug.rsh269
-rw-r--r--current/platform/rs/scriptc/rs_for_each.rsh434
-rw-r--r--current/platform/rs/scriptc/rs_graphics.rsh1522
-rw-r--r--current/platform/rs/scriptc/rs_io.rsh107
-rw-r--r--current/platform/rs/scriptc/rs_math.rsh6550
-rw-r--r--current/platform/rs/scriptc/rs_matrix.rsh612
-rw-r--r--current/platform/rs/scriptc/rs_object_info.rsh462
-rw-r--r--current/platform/rs/scriptc/rs_object_types.rsh232
-rw-r--r--current/platform/rs/scriptc/rs_quaternion.rsh374
-rw-r--r--current/platform/rs/scriptc/rs_time.rsh126
-rw-r--r--current/platform/rs/scriptc/rs_value_types.rsh543
-rw-r--r--current/platform/rs/scriptc/rs_vector_math.rsh453
-rwxr-xr-xcurrent/platform/x86/libRSSupport.sobin0 -> 1450868 bytes
-rwxr-xr-xcurrent/platform/x86/libRSSupportIO.sobin0 -> 287652 bytes
-rw-r--r--current/platform/x86/libRScpp_static.abin0 -> 1781568 bytes
-rwxr-xr-xcurrent/platform/x86/libblasV8.sobin0 -> 1835432 bytes
-rw-r--r--current/platform/x86/libcompiler_rt.abin0 -> 407084 bytes
-rw-r--r--current/platform/x86/librsrt.bcbin0 -> 296752 bytes
-rwxr-xr-xcurrent/platform/x86_64/libRSSupport.sobin0 -> 1454672 bytes
-rwxr-xr-xcurrent/platform/x86_64/libRSSupportIO.sobin0 -> 283768 bytes
-rw-r--r--current/platform/x86_64/libRScpp_static.abin0 -> 3034822 bytes
-rwxr-xr-xcurrent/platform/x86_64/libblasV8.sobin0 -> 2035440 bytes
-rw-r--r--current/platform/x86_64/libcompiler_rt.abin0 -> 762634 bytes
-rw-r--r--current/platform/x86_64/librsrt.bcbin0 -> 327644 bytes
151 files changed, 118810 insertions, 0 deletions
diff --git a/current/MODULE_LICENSE_APACHE2 b/current/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/current/MODULE_LICENSE_APACHE2
diff --git a/current/MODULE_LICENSE_BSD_LIKE b/current/MODULE_LICENSE_BSD_LIKE
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/current/MODULE_LICENSE_BSD_LIKE
diff --git a/current/MODULE_LICENSE_MIT b/current/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/current/MODULE_LICENSE_MIT
diff --git a/current/NOTICE b/current/NOTICE
new file mode 100644
index 0000000..01bf07b
--- /dev/null
+++ b/current/NOTICE
@@ -0,0 +1,889 @@
+==============================================================================
+LLVM Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2007-2014 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+The following pieces of software have additional or alternate copyrights,
+licenses, and/or restrictions:
+
+Program Directory
+------- ---------
+<none yet>
+
+
+==============================================================================
+compiler_rt License
+==============================================================================
+
+The compiler_rt library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license. As a user of this code you may choose
+to use it under either license. As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
+
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+==============================================================================
+Copyrights and Licenses for Third Party Software Distributed with LLVM:
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+
+==============================================================================
+LLVM Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2003-2014 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+Copyrights and Licenses for Third Party Software Distributed with LLVM:
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+The following pieces of software have additional or alternate copyrights,
+licenses, and/or restrictions:
+
+Program Directory
+------- ---------
+Autoconf llvm/autoconf
+ llvm/projects/ModuleMaker/autoconf
+Google Test llvm/utils/unittest/googletest
+OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex}
+pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT}
+ARM contributions llvm/lib/Target/ARM/LICENSE.TXT
+md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h
+
+=========================
+NOTICE file for slang.git
+=========================
+
+ Copyright (c) 2005-2011, The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+
+
+===========================================
+NOTICE file for external/clang (clang.git).
+Note: libclang*.a are statically linked.
+===========================================
+
+==============================================================================
+LLVM Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2007-2011 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+The following pieces of software have additional or alternate copyrights,
+licenses, and/or restrictions:
+
+Program Directory
+------- ---------
+<none yet>
+
+
+
+=========================================
+NOTICE file for external/llvm (llvm.git).
+Note: libLLVM*.a are statically linked.
+=========================================
+
+==============================================================================
+LLVM Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2003-2011 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+Copyrights and Licenses for Third Party Software Distributed with LLVM:
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+The following pieces of software have additional or alternate copyrights,
+licenses, and/or restrictions:
+
+Program Directory
+------- ---------
+Autoconf llvm/autoconf
+ llvm/projects/ModuleMaker/autoconf
+ llvm/projects/sample/autoconf
+CellSPU backend llvm/lib/Target/CellSPU/README.txt
+Google Test llvm/utils/unittest/googletest
+OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex}
+
+==========================
+NOTICE file for libbcc.git
+==========================
+
+* NOTICE for lib/ExecutionEngine/, lib/ScriptCRT/, include and helper/.
+
+ Copyright (c) 2005-2011, The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+
+
+* NOTICE for runtime/ and lib/CodeGen/.
+ Note: The NOTICE is the same for another git project, external/llvm.git.
+
+==============================================================================
+LLVM Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2003-2011 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+Copyrights and Licenses for Third Party Software Distributed with LLVM:
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+The following pieces of software have additional or alternate copyrights,
+licenses, and/or restrictions:
+
+Program Directory
+------- ---------
+Autoconf llvm/autoconf
+ llvm/projects/ModuleMaker/autoconf
+ llvm/projects/sample/autoconf
+CellSPU backend llvm/lib/Target/CellSPU/README.txt
+Google Test llvm/utils/unittest/googletest
+OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex}
+
+
+
+* NOTICE for tests/disassem.cpp and tests/disassem.h.
+
+/* $NetBSD: disassem.c,v 1.14 2003/03/27 16:58:36 mycroft Exp $ */
+
+/*-
+ * Copyright (c) 1996 Mark Brinicombe.
+ * Copyright (c) 1996 Brini.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * db_disasm.c
+ *
+ * Kernel disassembler
+ *
+ * Created : 10/02/96
+ *
+ * Structured after the sparc/sparc/db_disasm.c by David S. Miller &
+ * Paul Kranenburg
+ *
+ * This code is not complete. Not all instructions are disassembled.
+ */
diff --git a/current/bin/bcc_compat.exe b/current/bin/bcc_compat.exe
new file mode 100755
index 0000000..5ec833c
--- /dev/null
+++ b/current/bin/bcc_compat.exe
Binary files differ
diff --git a/current/bin/llvm-rs-cc.exe b/current/bin/llvm-rs-cc.exe
new file mode 100755
index 0000000..222582c
--- /dev/null
+++ b/current/bin/llvm-rs-cc.exe
Binary files differ
diff --git a/current/clang-include/__clang_cuda_cmath.h b/current/clang-include/__clang_cuda_cmath.h
new file mode 100644
index 0000000..ae7ff2f
--- /dev/null
+++ b/current/clang-include/__clang_cuda_cmath.h
@@ -0,0 +1,148 @@
+/*===---- __clang_cuda_cmath.h - Device-side CUDA cmath support ------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_CUDA_CMATH_H__
+#define __CLANG_CUDA_CMATH_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+// CUDA lets us use various std math functions on the device side. This file
+// works in concert with __clang_cuda_math_forward_declares.h to make this work.
+//
+// Specifically, the forward-declares header declares __device__ overloads for
+// these functions in the global namespace, then pulls them into namespace std
+// with 'using' statements. Then this file implements those functions, after
+// the implementations have been pulled in.
+//
+// It's important that we declare the functions in the global namespace and pull
+// them into namespace std with using statements, as opposed to simply declaring
+// these functions in namespace std, because our device functions need to
+// overload the standard library functions, which may be declared in the global
+// namespace or in std, depending on the degree of conformance of the stdlib
+// implementation. Declaring in the global namespace and pulling into namespace
+// std covers all of the known knowns.
+
+#define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))
+
+__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
+__DEVICE__ long abs(long __n) { return ::labs(__n); }
+__DEVICE__ float abs(float __x) { return ::fabsf(__x); }
+__DEVICE__ double abs(double __x) { return ::fabs(__x); }
+__DEVICE__ float acos(float __x) { return ::acosf(__x); }
+__DEVICE__ float asin(float __x) { return ::asinf(__x); }
+__DEVICE__ float atan(float __x) { return ::atanf(__x); }
+__DEVICE__ float atan2(float __x, float __y) { return ::atan2f(__x, __y); }
+__DEVICE__ float ceil(float __x) { return ::ceilf(__x); }
+__DEVICE__ float cos(float __x) { return ::cosf(__x); }
+__DEVICE__ float cosh(float __x) { return ::coshf(__x); }
+__DEVICE__ float exp(float __x) { return ::expf(__x); }
+__DEVICE__ float fabs(float __x) { return ::fabsf(__x); }
+__DEVICE__ float floor(float __x) { return ::floorf(__x); }
+__DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); }
+__DEVICE__ int fpclassify(float __x) {
+ return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
+ FP_ZERO, __x);
+}
+__DEVICE__ int fpclassify(double __x) {
+ return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
+ FP_ZERO, __x);
+}
+__DEVICE__ float frexp(float __arg, int *__exp) {
+ return ::frexpf(__arg, __exp);
+}
+__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
+__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
+__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
+__DEVICE__ bool isfinite(double __x) { return ::__finite(__x); }
+__DEVICE__ bool isgreater(float __x, float __y) {
+ return __builtin_isgreater(__x, __y);
+}
+__DEVICE__ bool isgreater(double __x, double __y) {
+ return __builtin_isgreater(__x, __y);
+}
+__DEVICE__ bool isgreaterequal(float __x, float __y) {
+ return __builtin_isgreaterequal(__x, __y);
+}
+__DEVICE__ bool isgreaterequal(double __x, double __y) {
+ return __builtin_isgreaterequal(__x, __y);
+}
+__DEVICE__ bool isless(float __x, float __y) {
+ return __builtin_isless(__x, __y);
+}
+__DEVICE__ bool isless(double __x, double __y) {
+ return __builtin_isless(__x, __y);
+}
+__DEVICE__ bool islessequal(float __x, float __y) {
+ return __builtin_islessequal(__x, __y);
+}
+__DEVICE__ bool islessequal(double __x, double __y) {
+ return __builtin_islessequal(__x, __y);
+}
+__DEVICE__ bool islessgreater(float __x, float __y) {
+ return __builtin_islessgreater(__x, __y);
+}
+__DEVICE__ bool islessgreater(double __x, double __y) {
+ return __builtin_islessgreater(__x, __y);
+}
+__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
+__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
+__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); }
+__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); }
+__DEVICE__ bool isunordered(float __x, float __y) {
+ return __builtin_isunordered(__x, __y);
+}
+__DEVICE__ bool isunordered(double __x, double __y) {
+ return __builtin_isunordered(__x, __y);
+}
+__DEVICE__ float ldexp(float __arg, int __exp) {
+ return ::ldexpf(__arg, __exp);
+}
+__DEVICE__ float log(float __x) { return ::logf(__x); }
+__DEVICE__ float log10(float __x) { return ::log10f(__x); }
+__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); }
+__DEVICE__ float nexttoward(float __from, float __to) {
+ return __builtin_nexttowardf(__from, __to);
+}
+__DEVICE__ double nexttoward(double __from, double __to) {
+ return __builtin_nexttoward(__from, __to);
+}
+__DEVICE__ float pow(float __base, float __exp) {
+ return ::powf(__base, __exp);
+}
+__DEVICE__ float pow(float __base, int __iexp) {
+ return ::powif(__base, __iexp);
+}
+__DEVICE__ double pow(double __base, int __iexp) {
+ return ::powi(__base, __iexp);
+}
+__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); }
+__DEVICE__ bool signbit(double __x) { return ::__signbit(__x); }
+__DEVICE__ float sin(float __x) { return ::sinf(__x); }
+__DEVICE__ float sinh(float __x) { return ::sinhf(__x); }
+__DEVICE__ float sqrt(float __x) { return ::sqrtf(__x); }
+__DEVICE__ float tan(float __x) { return ::tanf(__x); }
+__DEVICE__ float tanh(float __x) { return ::tanhf(__x); }
+
+#undef __DEVICE__
+
+#endif
diff --git a/current/clang-include/__clang_cuda_intrinsics.h b/current/clang-include/__clang_cuda_intrinsics.h
new file mode 100644
index 0000000..3df41fa
--- /dev/null
+++ b/current/clang-include/__clang_cuda_intrinsics.h
@@ -0,0 +1,322 @@
+/*===--- __clang_cuda_intrinsics.h - Device-side CUDA intrinsic wrappers ---===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_CUDA_INTRINSICS_H__
+#define __CLANG_CUDA_INTRINSICS_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+// sm_30 intrinsics: __shfl_{up,down,xor}.
+
+#define __SM_30_INTRINSICS_H__
+#define __SM_30_INTRINSICS_HPP__
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
+
+#pragma push_macro("__MAKE_SHUFFLES")
+#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask) \
+ inline __device__ int __FnName(int __in, int __offset, \
+ int __width = warpSize) { \
+ return __IntIntrinsic(__in, __offset, \
+ ((warpSize - __width) << 8) | (__Mask)); \
+ } \
+ inline __device__ float __FnName(float __in, int __offset, \
+ int __width = warpSize) { \
+ return __FloatIntrinsic(__in, __offset, \
+ ((warpSize - __width) << 8) | (__Mask)); \
+ } \
+ inline __device__ unsigned int __FnName(unsigned int __in, int __offset, \
+ int __width = warpSize) { \
+ return static_cast<unsigned int>( \
+ ::__FnName(static_cast<int>(__in), __offset, __width)); \
+ } \
+ inline __device__ long long __FnName(long long __in, int __offset, \
+ int __width = warpSize) { \
+ struct __Bits { \
+ int __a, __b; \
+ }; \
+ _Static_assert(sizeof(__in) == sizeof(__Bits)); \
+ _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \
+ __Bits __tmp; \
+ memcpy(&__in, &__tmp, sizeof(__in)); \
+ __tmp.__a = ::__FnName(__tmp.__a, __offset, __width); \
+ __tmp.__b = ::__FnName(__tmp.__b, __offset, __width); \
+ long long __out; \
+ memcpy(&__out, &__tmp, sizeof(__tmp)); \
+ return __out; \
+ } \
+ inline __device__ unsigned long long __FnName( \
+ unsigned long long __in, int __offset, int __width = warpSize) { \
+ return static_cast<unsigned long long>( \
+ ::__FnName(static_cast<unsigned long long>(__in), __offset, __width)); \
+ } \
+ inline __device__ double __FnName(double __in, int __offset, \
+ int __width = warpSize) { \
+ long long __tmp; \
+ _Static_assert(sizeof(__tmp) == sizeof(__in)); \
+ memcpy(&__tmp, &__in, sizeof(__in)); \
+ __tmp = ::__FnName(__tmp, __offset, __width); \
+ double __out; \
+ memcpy(&__out, &__tmp, sizeof(__out)); \
+ return __out; \
+ }
+
+__MAKE_SHUFFLES(__shfl, __nvvm_shfl_idx_i32, __nvvm_shfl_idx_f32, 0x1f);
+// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=
+// maxLane.
+__MAKE_SHUFFLES(__shfl_up, __nvvm_shfl_up_i32, __nvvm_shfl_up_f32, 0);
+__MAKE_SHUFFLES(__shfl_down, __nvvm_shfl_down_i32, __nvvm_shfl_down_f32, 0x1f);
+__MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f);
+
+#pragma pop_macro("__MAKE_SHUFFLES")
+
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
+
+// sm_32 intrinsics: __ldg and __funnelshift_{l,lc,r,rc}.
+
+// Prevent the vanilla sm_32 intrinsics header from being included.
+#define __SM_32_INTRINSICS_H__
+#define __SM_32_INTRINSICS_HPP__
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
+
+inline __device__ char __ldg(const char *ptr) { return __nvvm_ldg_c(ptr); }
+inline __device__ short __ldg(const short *ptr) { return __nvvm_ldg_s(ptr); }
+inline __device__ int __ldg(const int *ptr) { return __nvvm_ldg_i(ptr); }
+inline __device__ long __ldg(const long *ptr) { return __nvvm_ldg_l(ptr); }
+inline __device__ long long __ldg(const long long *ptr) {
+ return __nvvm_ldg_ll(ptr);
+}
+inline __device__ unsigned char __ldg(const unsigned char *ptr) {
+ return __nvvm_ldg_uc(ptr);
+}
+inline __device__ unsigned short __ldg(const unsigned short *ptr) {
+ return __nvvm_ldg_us(ptr);
+}
+inline __device__ unsigned int __ldg(const unsigned int *ptr) {
+ return __nvvm_ldg_ui(ptr);
+}
+inline __device__ unsigned long __ldg(const unsigned long *ptr) {
+ return __nvvm_ldg_ul(ptr);
+}
+inline __device__ unsigned long long __ldg(const unsigned long long *ptr) {
+ return __nvvm_ldg_ull(ptr);
+}
+inline __device__ float __ldg(const float *ptr) { return __nvvm_ldg_f(ptr); }
+inline __device__ double __ldg(const double *ptr) { return __nvvm_ldg_d(ptr); }
+
+inline __device__ char2 __ldg(const char2 *ptr) {
+ typedef char c2 __attribute__((ext_vector_type(2)));
+ // We can assume that ptr is aligned at least to char2's alignment, but the
+ // load will assume that ptr is aligned to char2's alignment. This is only
+ // safe if alignof(c2) <= alignof(char2).
+ c2 rv = __nvvm_ldg_c2(reinterpret_cast<const c2 *>(ptr));
+ char2 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ return ret;
+}
+inline __device__ char4 __ldg(const char4 *ptr) {
+ typedef char c4 __attribute__((ext_vector_type(4)));
+ c4 rv = __nvvm_ldg_c4(reinterpret_cast<const c4 *>(ptr));
+ char4 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ ret.z = rv[2];
+ ret.w = rv[3];
+ return ret;
+}
+inline __device__ short2 __ldg(const short2 *ptr) {
+ typedef short s2 __attribute__((ext_vector_type(2)));
+ s2 rv = __nvvm_ldg_s2(reinterpret_cast<const s2 *>(ptr));
+ short2 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ return ret;
+}
+inline __device__ short4 __ldg(const short4 *ptr) {
+ typedef short s4 __attribute__((ext_vector_type(4)));
+ s4 rv = __nvvm_ldg_s4(reinterpret_cast<const s4 *>(ptr));
+ short4 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ ret.z = rv[2];
+ ret.w = rv[3];
+ return ret;
+}
+inline __device__ int2 __ldg(const int2 *ptr) {
+ typedef int i2 __attribute__((ext_vector_type(2)));
+ i2 rv = __nvvm_ldg_i2(reinterpret_cast<const i2 *>(ptr));
+ int2 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ return ret;
+}
+inline __device__ int4 __ldg(const int4 *ptr) {
+ typedef int i4 __attribute__((ext_vector_type(4)));
+ i4 rv = __nvvm_ldg_i4(reinterpret_cast<const i4 *>(ptr));
+ int4 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ ret.z = rv[2];
+ ret.w = rv[3];
+ return ret;
+}
+inline __device__ longlong2 __ldg(const longlong2 *ptr) {
+ typedef long long ll2 __attribute__((ext_vector_type(2)));
+ ll2 rv = __nvvm_ldg_ll2(reinterpret_cast<const ll2 *>(ptr));
+ longlong2 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ return ret;
+}
+
+inline __device__ uchar2 __ldg(const uchar2 *ptr) {
+ typedef unsigned char uc2 __attribute__((ext_vector_type(2)));
+ uc2 rv = __nvvm_ldg_uc2(reinterpret_cast<const uc2 *>(ptr));
+ uchar2 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ return ret;
+}
+inline __device__ uchar4 __ldg(const uchar4 *ptr) {
+ typedef unsigned char uc4 __attribute__((ext_vector_type(4)));
+ uc4 rv = __nvvm_ldg_uc4(reinterpret_cast<const uc4 *>(ptr));
+ uchar4 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ ret.z = rv[2];
+ ret.w = rv[3];
+ return ret;
+}
+inline __device__ ushort2 __ldg(const ushort2 *ptr) {
+ typedef unsigned short us2 __attribute__((ext_vector_type(2)));
+ us2 rv = __nvvm_ldg_us2(reinterpret_cast<const us2 *>(ptr));
+ ushort2 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ return ret;
+}
+inline __device__ ushort4 __ldg(const ushort4 *ptr) {
+ typedef unsigned short us4 __attribute__((ext_vector_type(4)));
+ us4 rv = __nvvm_ldg_us4(reinterpret_cast<const us4 *>(ptr));
+ ushort4 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ ret.z = rv[2];
+ ret.w = rv[3];
+ return ret;
+}
+inline __device__ uint2 __ldg(const uint2 *ptr) {
+ typedef unsigned int ui2 __attribute__((ext_vector_type(2)));
+ ui2 rv = __nvvm_ldg_ui2(reinterpret_cast<const ui2 *>(ptr));
+ uint2 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ return ret;
+}
+inline __device__ uint4 __ldg(const uint4 *ptr) {
+ typedef unsigned int ui4 __attribute__((ext_vector_type(4)));
+ ui4 rv = __nvvm_ldg_ui4(reinterpret_cast<const ui4 *>(ptr));
+ uint4 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ ret.z = rv[2];
+ ret.w = rv[3];
+ return ret;
+}
+inline __device__ ulonglong2 __ldg(const ulonglong2 *ptr) {
+ typedef unsigned long long ull2 __attribute__((ext_vector_type(2)));
+ ull2 rv = __nvvm_ldg_ull2(reinterpret_cast<const ull2 *>(ptr));
+ ulonglong2 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ return ret;
+}
+
+inline __device__ float2 __ldg(const float2 *ptr) {
+ typedef float f2 __attribute__((ext_vector_type(2)));
+ f2 rv = __nvvm_ldg_f2(reinterpret_cast<const f2 *>(ptr));
+ float2 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ return ret;
+}
+inline __device__ float4 __ldg(const float4 *ptr) {
+ typedef float f4 __attribute__((ext_vector_type(4)));
+ f4 rv = __nvvm_ldg_f4(reinterpret_cast<const f4 *>(ptr));
+ float4 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ ret.z = rv[2];
+ ret.w = rv[3];
+ return ret;
+}
+inline __device__ double2 __ldg(const double2 *ptr) {
+ typedef double d2 __attribute__((ext_vector_type(2)));
+ d2 rv = __nvvm_ldg_d2(reinterpret_cast<const d2 *>(ptr));
+ double2 ret;
+ ret.x = rv[0];
+ ret.y = rv[1];
+ return ret;
+}
+
+// TODO: Implement these as intrinsics, so the backend can work its magic on
+// these. Alternatively, we could implement these as plain C and try to get
+// llvm to recognize the relevant patterns.
+inline __device__ unsigned __funnelshift_l(unsigned low32, unsigned high32,
+ unsigned shiftWidth) {
+ unsigned result;
+ asm("shf.l.wrap.b32 %0, %1, %2, %3;"
+ : "=r"(result)
+ : "r"(low32), "r"(high32), "r"(shiftWidth));
+ return result;
+}
+inline __device__ unsigned __funnelshift_lc(unsigned low32, unsigned high32,
+ unsigned shiftWidth) {
+ unsigned result;
+ asm("shf.l.clamp.b32 %0, %1, %2, %3;"
+ : "=r"(result)
+ : "r"(low32), "r"(high32), "r"(shiftWidth));
+ return result;
+}
+inline __device__ unsigned __funnelshift_r(unsigned low32, unsigned high32,
+ unsigned shiftWidth) {
+ unsigned result;
+ asm("shf.r.wrap.b32 %0, %1, %2, %3;"
+ : "=r"(result)
+ : "r"(low32), "r"(high32), "r"(shiftWidth));
+ return result;
+}
+inline __device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32,
+ unsigned shiftWidth) {
+ unsigned ret;
+ asm("shf.r.clamp.b32 %0, %1, %2, %3;"
+ : "=r"(ret)
+ : "r"(low32), "r"(high32), "r"(shiftWidth));
+ return ret;
+}
+
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
+
+#endif // defined(__CLANG_CUDA_INTRINSICS_H__)
diff --git a/current/clang-include/__clang_cuda_math_forward_declares.h b/current/clang-include/__clang_cuda_math_forward_declares.h
new file mode 100644
index 0000000..3f2834d
--- /dev/null
+++ b/current/clang-include/__clang_cuda_math_forward_declares.h
@@ -0,0 +1,263 @@
+/*===- __clang_math_forward_declares.h - Prototypes of __device__ math fns --===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
+#define __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+// This file forward-declares of some math functions we (or the CUDA headers)
+// will define later. We need to do this, and do it before cmath is included,
+// because the standard library may have constexpr math functions. In the
+// absence of a prior __device__ decl, those constexpr functions may become
+// implicitly host+device. host+device functions can't be overloaded, so that
+// would preclude the use of our own __device__ overloads for these functions.
+
+#pragma push_macro("__DEVICE__")
+#define __DEVICE__ \
+ static __inline__ __attribute__((always_inline)) __attribute__((device))
+
+__DEVICE__ double abs(double);
+__DEVICE__ float abs(float);
+__DEVICE__ int abs(int);
+__DEVICE__ long abs(long);
+__DEVICE__ long long abs(long long);
+__DEVICE__ double acos(double);
+__DEVICE__ float acos(float);
+__DEVICE__ double acosh(double);
+__DEVICE__ float acosh(float);
+__DEVICE__ double asin(double);
+__DEVICE__ float asin(float);
+__DEVICE__ double asinh(double);
+__DEVICE__ float asinh(float);
+__DEVICE__ double atan2(double, double);
+__DEVICE__ float atan2(float, float);
+__DEVICE__ double atan(double);
+__DEVICE__ float atan(float);
+__DEVICE__ double atanh(double);
+__DEVICE__ float atanh(float);
+__DEVICE__ double cbrt(double);
+__DEVICE__ float cbrt(float);
+__DEVICE__ double ceil(double);
+__DEVICE__ float ceil(float);
+__DEVICE__ double copysign(double, double);
+__DEVICE__ float copysign(float, float);
+__DEVICE__ double cos(double);
+__DEVICE__ float cos(float);
+__DEVICE__ double cosh(double);
+__DEVICE__ float cosh(float);
+__DEVICE__ double erfc(double);
+__DEVICE__ float erfc(float);
+__DEVICE__ double erf(double);
+__DEVICE__ float erf(float);
+__DEVICE__ double exp2(double);
+__DEVICE__ float exp2(float);
+__DEVICE__ double exp(double);
+__DEVICE__ float exp(float);
+__DEVICE__ double expm1(double);
+__DEVICE__ float expm1(float);
+__DEVICE__ double fabs(double);
+__DEVICE__ float fabs(float);
+__DEVICE__ double fdim(double, double);
+__DEVICE__ float fdim(float, float);
+__DEVICE__ double floor(double);
+__DEVICE__ float floor(float);
+__DEVICE__ double fma(double, double, double);
+__DEVICE__ float fma(float, float, float);
+__DEVICE__ double fmax(double, double);
+__DEVICE__ float fmax(float, float);
+__DEVICE__ double fmin(double, double);
+__DEVICE__ float fmin(float, float);
+__DEVICE__ double fmod(double, double);
+__DEVICE__ float fmod(float, float);
+__DEVICE__ int fpclassify(double);
+__DEVICE__ int fpclassify(float);
+__DEVICE__ double frexp(double, int *);
+__DEVICE__ float frexp(float, int *);
+__DEVICE__ double hypot(double, double);
+__DEVICE__ float hypot(float, float);
+__DEVICE__ int ilogb(double);
+__DEVICE__ int ilogb(float);
+__DEVICE__ bool isfinite(double);
+__DEVICE__ bool isfinite(float);
+__DEVICE__ bool isgreater(double, double);
+__DEVICE__ bool isgreaterequal(double, double);
+__DEVICE__ bool isgreaterequal(float, float);
+__DEVICE__ bool isgreater(float, float);
+__DEVICE__ bool isinf(double);
+__DEVICE__ bool isinf(float);
+__DEVICE__ bool isless(double, double);
+__DEVICE__ bool islessequal(double, double);
+__DEVICE__ bool islessequal(float, float);
+__DEVICE__ bool isless(float, float);
+__DEVICE__ bool islessgreater(double, double);
+__DEVICE__ bool islessgreater(float, float);
+__DEVICE__ bool isnan(double);
+__DEVICE__ bool isnan(float);
+__DEVICE__ bool isnormal(double);
+__DEVICE__ bool isnormal(float);
+__DEVICE__ bool isunordered(double, double);
+__DEVICE__ bool isunordered(float, float);
+__DEVICE__ long labs(long);
+__DEVICE__ double ldexp(double, int);
+__DEVICE__ float ldexp(float, int);
+__DEVICE__ double lgamma(double);
+__DEVICE__ float lgamma(float);
+__DEVICE__ long long llabs(long long);
+__DEVICE__ long long llrint(double);
+__DEVICE__ long long llrint(float);
+__DEVICE__ double log10(double);
+__DEVICE__ float log10(float);
+__DEVICE__ double log1p(double);
+__DEVICE__ float log1p(float);
+__DEVICE__ double log2(double);
+__DEVICE__ float log2(float);
+__DEVICE__ double logb(double);
+__DEVICE__ float logb(float);
+__DEVICE__ double log(double);
+__DEVICE__ float log(float);
+__DEVICE__ long lrint(double);
+__DEVICE__ long lrint(float);
+__DEVICE__ long lround(double);
+__DEVICE__ long lround(float);
+__DEVICE__ double modf(double, double *);
+__DEVICE__ float modf(float, float *);
+__DEVICE__ double nan(const char *);
+__DEVICE__ float nanf(const char *);
+__DEVICE__ double nearbyint(double);
+__DEVICE__ float nearbyint(float);
+__DEVICE__ double nextafter(double, double);
+__DEVICE__ float nextafter(float, float);
+__DEVICE__ double nexttoward(double, double);
+__DEVICE__ float nexttoward(float, float);
+__DEVICE__ double pow(double, double);
+__DEVICE__ double pow(double, int);
+__DEVICE__ float pow(float, float);
+__DEVICE__ float pow(float, int);
+__DEVICE__ double remainder(double, double);
+__DEVICE__ float remainder(float, float);
+__DEVICE__ double remquo(double, double, int *);
+__DEVICE__ float remquo(float, float, int *);
+__DEVICE__ double rint(double);
+__DEVICE__ float rint(float);
+__DEVICE__ double round(double);
+__DEVICE__ float round(float);
+__DEVICE__ double scalbln(double, long);
+__DEVICE__ float scalbln(float, long);
+__DEVICE__ double scalbn(double, int);
+__DEVICE__ float scalbn(float, int);
+__DEVICE__ bool signbit(double);
+__DEVICE__ bool signbit(float);
+__DEVICE__ double sin(double);
+__DEVICE__ float sin(float);
+__DEVICE__ double sinh(double);
+__DEVICE__ float sinh(float);
+__DEVICE__ double sqrt(double);
+__DEVICE__ float sqrt(float);
+__DEVICE__ double tan(double);
+__DEVICE__ float tan(float);
+__DEVICE__ double tanh(double);
+__DEVICE__ float tanh(float);
+__DEVICE__ double tgamma(double);
+__DEVICE__ float tgamma(float);
+__DEVICE__ double trunc(double);
+__DEVICE__ float trunc(float);
+
+namespace std {
+using ::abs;
+using ::acos;
+using ::acosh;
+using ::asin;
+using ::asinh;
+using ::atan;
+using ::atan2;
+using ::atanh;
+using ::cbrt;
+using ::ceil;
+using ::copysign;
+using ::cos;
+using ::cosh;
+using ::erf;
+using ::erfc;
+using ::exp;
+using ::exp2;
+using ::expm1;
+using ::fabs;
+using ::fdim;
+using ::floor;
+using ::fma;
+using ::fmax;
+using ::fmin;
+using ::fmod;
+using ::fpclassify;
+using ::frexp;
+using ::hypot;
+using ::ilogb;
+using ::isfinite;
+using ::isgreater;
+using ::isgreaterequal;
+using ::isinf;
+using ::isless;
+using ::islessequal;
+using ::islessgreater;
+using ::isnan;
+using ::isnormal;
+using ::isunordered;
+using ::labs;
+using ::ldexp;
+using ::lgamma;
+using ::llabs;
+using ::llrint;
+using ::log;
+using ::log10;
+using ::log1p;
+using ::log2;
+using ::logb;
+using ::lrint;
+using ::lround;
+using ::modf;
+using ::nan;
+using ::nanf;
+using ::nearbyint;
+using ::nextafter;
+using ::nexttoward;
+using ::pow;
+using ::remainder;
+using ::remquo;
+using ::rint;
+using ::round;
+using ::scalbln;
+using ::scalbn;
+using ::signbit;
+using ::sin;
+using ::sinh;
+using ::sqrt;
+using ::tan;
+using ::tanh;
+using ::tgamma;
+using ::trunc;
+} // namespace std
+
+#pragma pop_macro("__DEVICE__")
+
+#endif
diff --git a/current/clang-include/__clang_cuda_runtime_wrapper.h b/current/clang-include/__clang_cuda_runtime_wrapper.h
new file mode 100644
index 0000000..6445f9b
--- /dev/null
+++ b/current/clang-include/__clang_cuda_runtime_wrapper.h
@@ -0,0 +1,316 @@
+/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * WARNING: This header is intended to be directly -include'd by
+ * the compiler and is not supposed to be included by users.
+ *
+ * CUDA headers are implemented in a way that currently makes it
+ * impossible for user code to #include directly when compiling with
+ * Clang. They present different view of CUDA-supplied functions
+ * depending on where in NVCC's compilation pipeline the headers are
+ * included. Neither of these modes provides function definitions with
+ * correct attributes, so we use preprocessor to force the headers
+ * into a form that Clang can use.
+ *
+ * Similarly to NVCC which -include's cuda_runtime.h, Clang -include's
+ * this file during every CUDA compilation.
+ */
+
+#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__
+#define __CLANG_CUDA_RUNTIME_WRAPPER_H__
+
+#if defined(__CUDA__) && defined(__clang__)
+
+// Include some forward declares that must come before cmath.
+#include <__clang_cuda_math_forward_declares.h>
+
+// Include some standard headers to avoid CUDA headers including them
+// while some required macros (like __THROW) are in a weird state.
+#include <cmath>
+#include <cstdlib>
+#include <stdlib.h>
+
+// Preserve common macros that will be changed below by us or by CUDA
+// headers.
+#pragma push_macro("__THROW")
+#pragma push_macro("__CUDA_ARCH__")
+
+// WARNING: Preprocessor hacks below are based on specific details of
+// CUDA-7.x headers and are not expected to work with any other
+// version of CUDA headers.
+#include "cuda.h"
+#if !defined(CUDA_VERSION)
+#error "cuda.h did not define CUDA_VERSION"
+#elif CUDA_VERSION < 7000 || CUDA_VERSION > 7050
+#error "Unsupported CUDA version!"
+#endif
+
+// Make largest subset of device functions available during host
+// compilation -- SM_35 for the time being.
+#ifndef __CUDA_ARCH__
+#define __CUDA_ARCH__ 350
+#endif
+
+#include "cuda_builtin_vars.h"
+
+// No need for device_launch_parameters.h as cuda_builtin_vars.h above
+// has taken care of builtin variables declared in the file.
+#define __DEVICE_LAUNCH_PARAMETERS_H__
+
+// {math,device}_functions.h only have declarations of the
+// functions. We don't need them as we're going to pull in their
+// definitions from .hpp files.
+#define __DEVICE_FUNCTIONS_H__
+#define __MATH_FUNCTIONS_H__
+#define __COMMON_FUNCTIONS_H__
+
+#undef __CUDACC__
+#define __CUDABE__
+// Disables definitions of device-side runtime support stubs in
+// cuda_device_runtime_api.h
+#include "driver_types.h"
+#include "host_config.h"
+#include "host_defines.h"
+
+#undef __CUDABE__
+#define __CUDACC__
+#include "cuda_runtime.h"
+
+#undef __CUDACC__
+#define __CUDABE__
+
+// CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does
+// not have at the moment. Emulate them with a builtin memcpy/memset.
+#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n)
+#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n)
+
+#include "crt/device_runtime.h"
+#include "crt/host_runtime.h"
+// device_runtime.h defines __cxa_* macros that will conflict with
+// cxxabi.h.
+// FIXME: redefine these as __device__ functions.
+#undef __cxa_vec_ctor
+#undef __cxa_vec_cctor
+#undef __cxa_vec_dtor
+#undef __cxa_vec_new2
+#undef __cxa_vec_new3
+#undef __cxa_vec_delete2
+#undef __cxa_vec_delete
+#undef __cxa_vec_delete3
+#undef __cxa_pure_virtual
+
+// We need decls for functions in CUDA's libdevice with __device__
+// attribute only. Alas they come either as __host__ __device__ or
+// with no attributes at all. To work around that, define __CUDA_RTC__
+// which produces HD variant and undef __host__ which gives us desided
+// decls with __device__ attribute.
+#pragma push_macro("__host__")
+#define __host__
+#define __CUDACC_RTC__
+#include "device_functions_decls.h"
+#undef __CUDACC_RTC__
+
+// Temporarily poison __host__ macro to ensure it's not used by any of
+// the headers we're about to include.
+#define __host__ UNEXPECTED_HOST_ATTRIBUTE
+
+// device_functions.hpp and math_functions*.hpp use 'static
+// __forceinline__' (with no __device__) for definitions of device
+// functions. Temporarily redefine __forceinline__ to include
+// __device__.
+#pragma push_macro("__forceinline__")
+#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
+#include "device_functions.hpp"
+
+// math_function.hpp uses the __USE_FAST_MATH__ macro to determine whether we
+// get the slow-but-accurate or fast-but-inaccurate versions of functions like
+// sin and exp. This is controlled in clang by -fcuda-approx-transcendentals.
+//
+// device_functions.hpp uses __USE_FAST_MATH__ for a different purpose (fast vs.
+// slow divides), so we need to scope our define carefully here.
+#pragma push_macro("__USE_FAST_MATH__")
+#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
+#define __USE_FAST_MATH__
+#endif
+#include "math_functions.hpp"
+#pragma pop_macro("__USE_FAST_MATH__")
+
+#include "math_functions_dbl_ptx3.hpp"
+#pragma pop_macro("__forceinline__")
+
+// Pull in host-only functions that are only available when neither
+// __CUDACC__ nor __CUDABE__ are defined.
+#undef __MATH_FUNCTIONS_HPP__
+#undef __CUDABE__
+#include "math_functions.hpp"
+// Alas, additional overloads for these functions are hard to get to.
+// Considering that we only need these overloads for a few functions,
+// we can provide them here.
+static inline float rsqrt(float __a) { return rsqrtf(__a); }
+static inline float rcbrt(float __a) { return rcbrtf(__a); }
+static inline float sinpi(float __a) { return sinpif(__a); }
+static inline float cospi(float __a) { return cospif(__a); }
+static inline void sincospi(float __a, float *__b, float *__c) {
+ return sincospif(__a, __b, __c);
+}
+static inline float erfcinv(float __a) { return erfcinvf(__a); }
+static inline float normcdfinv(float __a) { return normcdfinvf(__a); }
+static inline float normcdf(float __a) { return normcdff(__a); }
+static inline float erfcx(float __a) { return erfcxf(__a); }
+
+// For some reason single-argument variant is not always declared by
+// CUDA headers. Alas, device_functions.hpp included below needs it.
+static inline __device__ void __brkpt(int __c) { __brkpt(); }
+
+// Now include *.hpp with definitions of various GPU functions. Alas,
+// a lot of thins get declared/defined with __host__ attribute which
+// we don't want and we have to define it out. We also have to include
+// {device,math}_functions.hpp again in order to extract the other
+// branch of #if/else inside.
+
+#define __host__
+#undef __CUDABE__
+#define __CUDACC__
+#undef __DEVICE_FUNCTIONS_HPP__
+#include "device_atomic_functions.hpp"
+#include "device_functions.hpp"
+#include "sm_20_atomic_functions.hpp"
+#include "sm_20_intrinsics.hpp"
+#include "sm_32_atomic_functions.hpp"
+
+// Don't include sm_30_intrinsics.h and sm_32_intrinsics.h. These define the
+// __shfl and __ldg intrinsics using inline (volatile) asm, but we want to
+// define them using builtins so that the optimizer can reason about and across
+// these instructions. In particular, using intrinsics for ldg gets us the
+// [addr+imm] addressing mode, which, although it doesn't actually exist in the
+// hardware, seems to generate faster machine code because ptxas can more easily
+// reason about our code.
+
+#undef __MATH_FUNCTIONS_HPP__
+
+// math_functions.hpp defines ::signbit as a __host__ __device__ function. This
+// conflicts with libstdc++'s constexpr ::signbit, so we have to rename
+// math_function.hpp's ::signbit. It's guarded by #undef signbit, but that's
+// conditional on __GNUC__. :)
+#pragma push_macro("signbit")
+#pragma push_macro("__GNUC__")
+#undef __GNUC__
+#define signbit __ignored_cuda_signbit
+#include "math_functions.hpp"
+#pragma pop_macro("__GNUC__")
+#pragma pop_macro("signbit")
+
+#pragma pop_macro("__host__")
+
+#include "texture_indirect_functions.h"
+
+// Restore state of __CUDA_ARCH__ and __THROW we had on entry.
+#pragma pop_macro("__CUDA_ARCH__")
+#pragma pop_macro("__THROW")
+
+// Set up compiler macros expected to be seen during compilation.
+#undef __CUDABE__
+#define __CUDACC__
+
+extern "C" {
+// Device-side CUDA system calls.
+// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls
+// We need these declarations and wrappers for device-side
+// malloc/free/printf calls to work without relying on
+// -fcuda-disable-target-call-checks option.
+__device__ int vprintf(const char *, const char *);
+__device__ void free(void *) __attribute((nothrow));
+__device__ void *malloc(size_t) __attribute((nothrow)) __attribute__((malloc));
+__device__ void __assertfail(const char *__message, const char *__file,
+ unsigned __line, const char *__function,
+ size_t __charSize) __attribute__((noreturn));
+
+// In order for standard assert() macro on linux to work we need to
+// provide device-side __assert_fail()
+__device__ static inline void __assert_fail(const char *__message,
+ const char *__file, unsigned __line,
+ const char *__function) {
+ __assertfail(__message, __file, __line, __function, sizeof(char));
+}
+
+// Clang will convert printf into vprintf, but we still need
+// device-side declaration for it.
+__device__ int printf(const char *, ...);
+} // extern "C"
+
+// We also need device-side std::malloc and std::free.
+namespace std {
+__device__ static inline void free(void *__ptr) { ::free(__ptr); }
+__device__ static inline void *malloc(size_t __size) {
+ return ::malloc(__size);
+}
+} // namespace std
+
+// Out-of-line implementations from cuda_builtin_vars.h. These need to come
+// after we've pulled in the definition of uint3 and dim3.
+
+__device__ inline __cuda_builtin_threadIdx_t::operator uint3() const {
+ uint3 ret;
+ ret.x = x;
+ ret.y = y;
+ ret.z = z;
+ return ret;
+}
+
+__device__ inline __cuda_builtin_blockIdx_t::operator uint3() const {
+ uint3 ret;
+ ret.x = x;
+ ret.y = y;
+ ret.z = z;
+ return ret;
+}
+
+__device__ inline __cuda_builtin_blockDim_t::operator dim3() const {
+ return dim3(x, y, z);
+}
+
+__device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
+ return dim3(x, y, z);
+}
+
+#include <__clang_cuda_cmath.h>
+#include <__clang_cuda_intrinsics.h>
+
+// curand_mtgp32_kernel helpfully redeclares blockDim and threadIdx in host
+// mode, giving them their "proper" types of dim3 and uint3. This is
+// incompatible with the types we give in cuda_builtin_vars.h. As as hack,
+// force-include the header (nvcc doesn't include it by default) but redefine
+// dim3 and uint3 to our builtin types. (Thankfully dim3 and uint3 are only
+// used here for the redeclarations of blockDim and threadIdx.)
+#pragma push_macro("dim3")
+#pragma push_macro("uint3")
+#define dim3 __cuda_builtin_blockDim_t
+#define uint3 __cuda_builtin_threadIdx_t
+#include "curand_mtgp32_kernel.h"
+#pragma pop_macro("dim3")
+#pragma pop_macro("uint3")
+#pragma pop_macro("__USE_FAST_MATH__")
+
+#endif // __CUDA__
+#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__
diff --git a/current/clang-include/__stddef_max_align_t.h b/current/clang-include/__stddef_max_align_t.h
new file mode 100644
index 0000000..1e10ca9
--- /dev/null
+++ b/current/clang-include/__stddef_max_align_t.h
@@ -0,0 +1,43 @@
+/*===---- __stddef_max_align_t.h - Definition of max_align_t for modules ---===
+ *
+ * Copyright (c) 2014 Chandler Carruth
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_MAX_ALIGN_T_DEFINED
+#define __CLANG_MAX_ALIGN_T_DEFINED
+
+#if defined(_MSC_VER)
+typedef double max_align_t;
+#elif defined(__APPLE__)
+typedef long double max_align_t;
+#else
+// Define 'max_align_t' to match the GCC definition.
+typedef struct {
+ long long __clang_max_align_nonce1
+ __attribute__((__aligned__(__alignof__(long long))));
+ long double __clang_max_align_nonce2
+ __attribute__((__aligned__(__alignof__(long double))));
+} max_align_t;
+#endif
+
+#endif
diff --git a/current/clang-include/__wmmintrin_aes.h b/current/clang-include/__wmmintrin_aes.h
new file mode 100644
index 0000000..211518e
--- /dev/null
+++ b/current/clang-include/__wmmintrin_aes.h
@@ -0,0 +1,151 @@
+/*===---- __wmmintrin_aes.h - AES intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef _WMMINTRIN_AES_H
+#define _WMMINTRIN_AES_H
+
+#include <emmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes")))
+
+/// \brief Performs a single round of AES encryption using the Equivalent
+/// Inverse Cipher, transforming the state value from the first source
+/// operand using a 128-bit round key value contained in the second source
+/// operand, and writes the result to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VAESENC instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the state value.
+/// \param __R
+/// A 128-bit integer vector containing the round key value.
+/// \returns A 128-bit integer vector containing the encrypted value.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesenc_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenc128((__v2di)__V, (__v2di)__R);
+}
+
+/// \brief Performs the final round of AES encryption using the Equivalent
+/// Inverse Cipher, transforming the state value from the first source
+/// operand using a 128-bit round key value contained in the second source
+/// operand, and writes the result to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VAESENCLAST instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the state value.
+/// \param __R
+/// A 128-bit integer vector containing the round key value.
+/// \returns A 128-bit integer vector containing the encrypted value.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesenclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenclast128((__v2di)__V, (__v2di)__R);
+}
+
+/// \brief Performs a single round of AES decryption using the Equivalent
+/// Inverse Cipher, transforming the state value from the first source
+/// operand using a 128-bit round key value contained in the second source
+/// operand, and writes the result to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VAESDEC instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the state value.
+/// \param __R
+/// A 128-bit integer vector containing the round key value.
+/// \returns A 128-bit integer vector containing the decrypted value.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesdec_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdec128((__v2di)__V, (__v2di)__R);
+}
+
+/// \brief Performs the final round of AES decryption using the Equivalent
+/// Inverse Cipher, transforming the state value from the first source
+/// operand using a 128-bit round key value contained in the second source
+/// operand, and writes the result to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VAESDECLAST instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the state value.
+/// \param __R
+/// A 128-bit integer vector containing the round key value.
+/// \returns A 128-bit integer vector containing the decrypted value.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesdeclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdeclast128((__v2di)__V, (__v2di)__R);
+}
+
+/// \brief Applies the AES InvMixColumns() transformation to an expanded key
+/// contained in the source operand, and writes the result to the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VAESIMC instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the expanded key.
+/// \returns A 128-bit integer vector containing the transformed value.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesimc_si128(__m128i __V)
+{
+ return (__m128i)__builtin_ia32_aesimc128((__v2di)__V);
+}
+
+/// \brief Generates a round key for AES encyption, operating on 128-bit data
+/// specified in the first source operand and using an 8-bit round constant
+/// specified by the second source operand, and writes the result to the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_aeskeygenassist_si128(__m128i C, const int R);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c AESKEYGENASSIST instruction.
+///
+/// \param C
+/// A 128-bit integer vector that is used to generate the AES encryption key.
+/// \param R
+/// An 8-bit round constant used to generate the AES encryption key.
+/// \returns A 128-bit round key for AES encryption.
+#define _mm_aeskeygenassist_si128(C, R) \
+ (__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R))
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* _WMMINTRIN_AES_H */
diff --git a/current/clang-include/__wmmintrin_pclmul.h b/current/clang-include/__wmmintrin_pclmul.h
new file mode 100644
index 0000000..d4e073f
--- /dev/null
+++ b/current/clang-include/__wmmintrin_pclmul.h
@@ -0,0 +1,58 @@
+/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef _WMMINTRIN_PCLMUL_H
+#define _WMMINTRIN_PCLMUL_H
+
+/// \brief Multiplies two 64-bit integer values, which are selected from source
+/// operands using the immediate-value operand. The multiplication is a
+/// carry-less multiplication, and the 128-bit integer product is stored in
+/// the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPCLMULQDQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] containing one of the source operands.
+/// \param __Y
+/// A 128-bit vector of [2 x i64] containing one of the source operands.
+/// \param __I
+/// An immediate value specifying which 64-bit values to select from the
+/// operands.
+/// Bit 0 is used to select a value from operand __X,
+/// and bit 4 is used to select a value from operand __Y:
+/// Bit[0]=0 indicates that bits[63:0] of operand __X are used.
+/// Bit[0]=1 indicates that bits[127:64] of operand __X are used.
+/// Bit[4]=0 indicates that bits[63:0] of operand __Y are used.
+/// Bit[4]=1 indicates that bits[127:64] of operand __Y are used.
+/// \returns The 128-bit integer vector containing the result of the carry-less
+/// multiplication of the selected 64-bit values.
+#define _mm_clmulepi64_si128(__X, __Y, __I) \
+ ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
+ (__v2di)(__m128i)(__Y), (char)(__I)))
+
+#endif /* _WMMINTRIN_PCLMUL_H */
diff --git a/current/clang-include/adxintrin.h b/current/clang-include/adxintrin.h
new file mode 100644
index 0000000..ee34728
--- /dev/null
+++ b/current/clang-include/adxintrin.h
@@ -0,0 +1,86 @@
+/*===---- adxintrin.h - ADX intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <adxintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __ADXINTRIN_H
+#define __ADXINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+/* Intrinsics that are available only if __ADX__ defined */
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+_addcarryx_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
+}
+#endif
+
+/* Intrinsics that are also available if __ADX__ undefined */
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_addcarry_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_addcarry_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_addcarry_u64(__cf, __x, __y, __p);
+}
+#endif
+
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_subborrow_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_subborrow_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __ADXINTRIN_H */
diff --git a/current/clang-include/altivec.h b/current/clang-include/altivec.h
new file mode 100644
index 0000000..74a1914
--- /dev/null
+++ b/current/clang-include/altivec.h
@@ -0,0 +1,14458 @@
+/*===---- altivec.h - Standard header for type generic math ---------------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __ALTIVEC_H
+#define __ALTIVEC_H
+
+#ifndef __ALTIVEC__
+#error "AltiVec support not enabled"
+#endif
+
+/* Constants for mapping CR6 bits to predicate result. */
+
+#define __CR6_EQ 0
+#define __CR6_EQ_REV 1
+#define __CR6_LT 2
+#define __CR6_LT_REV 3
+
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+static __inline__ vector signed char __ATTRS_o_ai vec_perm(
+ vector signed char __a, vector signed char __b, vector unsigned char __c);
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_perm(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c);
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c);
+
+static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
+ vector signed short __b,
+ vector unsigned char __c);
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_perm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned char __c);
+
+static __inline__ vector bool short __ATTRS_o_ai vec_perm(
+ vector bool short __a, vector bool short __b, vector unsigned char __c);
+
+static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
+ vector pixel __b,
+ vector unsigned char __c);
+
+static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
+ vector signed int __b,
+ vector unsigned char __c);
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_perm(
+ vector unsigned int __a, vector unsigned int __b, vector unsigned char __c);
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c);
+
+static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
+ vector float __b,
+ vector unsigned char __c);
+
+#ifdef __VSX__
+static __inline__ vector long long __ATTRS_o_ai
+vec_perm(vector signed long long __a, vector signed long long __b,
+ vector unsigned char __c);
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c);
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_perm(vector bool long long __a, vector bool long long __b,
+ vector unsigned char __c);
+
+static __inline__ vector double __ATTRS_o_ai vec_perm(vector double __a,
+ vector double __b,
+ vector unsigned char __c);
+#endif
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char __a, vector unsigned char __b);
+
+/* vec_abs */
+
+#define __builtin_altivec_abs_v16qi vec_abs
+#define __builtin_altivec_abs_v8hi vec_abs
+#define __builtin_altivec_abs_v4si vec_abs
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_abs(vector signed char __a) {
+ return __builtin_altivec_vmaxsb(__a, -__a);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_abs(vector signed short __a) {
+ return __builtin_altivec_vmaxsh(__a, -__a);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_abs(vector signed int __a) {
+ return __builtin_altivec_vmaxsw(__a, -__a);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_abs(vector signed long long __a) {
+ return __builtin_altivec_vmaxsd(__a, -__a);
+}
+#endif
+
+static __inline__ vector float __ATTRS_o_ai vec_abs(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvabssp(__a);
+#else
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF);
+ return (vector float)__res;
+#endif
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector double __ATTRS_o_ai vec_abs(vector double __a) {
+ return __builtin_vsx_xvabsdp(__a);
+}
+#endif
+
+/* vec_abss */
+#define __builtin_altivec_abss_v16qi vec_abss
+#define __builtin_altivec_abss_v8hi vec_abss
+#define __builtin_altivec_abss_v4si vec_abss
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_abss(vector signed char __a) {
+ return __builtin_altivec_vmaxsb(
+ __a, __builtin_altivec_vsubsbs((vector signed char)(0), __a));
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_abss(vector signed short __a) {
+ return __builtin_altivec_vmaxsh(
+ __a, __builtin_altivec_vsubshs((vector signed short)(0), __a));
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_abss(vector signed int __a) {
+ return __builtin_altivec_vmaxsw(
+ __a, __builtin_altivec_vsubsws((vector signed int)(0), __a));
+}
+
+/* vec_add */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_add(vector signed char __a, vector signed char __b) {
+ return __a + __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_add(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a + __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_add(vector signed char __a, vector bool char __b) {
+ return __a + (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_add(vector unsigned char __a, vector unsigned char __b) {
+ return __a + __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_add(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a + __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_add(vector unsigned char __a, vector bool char __b) {
+ return __a + (vector unsigned char)__b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
+ vector short __b) {
+ return __a + __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_add(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a + __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
+ vector bool short __b) {
+ return __a + (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_add(vector unsigned short __a, vector unsigned short __b) {
+ return __a + __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_add(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a + __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_add(vector unsigned short __a, vector bool short __b) {
+ return __a + (vector unsigned short)__b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
+ vector int __b) {
+ return __a + __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_add(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a + __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
+ vector bool int __b) {
+ return __a + (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_add(vector unsigned int __a, vector unsigned int __b) {
+ return __a + __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_add(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a + __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_add(vector unsigned int __a, vector bool int __b) {
+ return __a + (vector unsigned int)__b;
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_add(vector signed long long __a, vector signed long long __b) {
+ return __a + __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_add(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a + __b;
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_add(vector signed __int128 __a, vector signed __int128 __b) {
+ return __a + __b;
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a + __b;
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+static __inline__ vector float __ATTRS_o_ai vec_add(vector float __a,
+ vector float __b) {
+ return __a + __b;
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a,
+ vector double __b) {
+ return __a + __b;
+}
+#endif // __VSX__
+
+/* vec_adde */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_adde(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+#endif
+
+/* vec_addec */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_addec(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+#endif
+
+/* vec_vaddubm */
+
+#define __builtin_altivec_vaddubm vec_vaddubm
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddubm(vector signed char __a, vector signed char __b) {
+ return __a + __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddubm(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a + __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddubm(vector signed char __a, vector bool char __b) {
+ return __a + (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubm(vector unsigned char __a, vector unsigned char __b) {
+ return __a + __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubm(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a + __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubm(vector unsigned char __a, vector bool char __b) {
+ return __a + (vector unsigned char)__b;
+}
+
+/* vec_vadduhm */
+
+#define __builtin_altivec_vadduhm vec_vadduhm
+
+static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
+ vector short __b) {
+ return __a + __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a + __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
+ vector bool short __b) {
+ return __a + (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector unsigned short __a, vector unsigned short __b) {
+ return __a + __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a + __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector unsigned short __a, vector bool short __b) {
+ return __a + (vector unsigned short)__b;
+}
+
+/* vec_vadduwm */
+
+#define __builtin_altivec_vadduwm vec_vadduwm
+
+static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
+ vector int __b) {
+ return __a + __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a + __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
+ vector bool int __b) {
+ return __a + (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduwm(vector unsigned int __a, vector unsigned int __b) {
+ return __a + __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduwm(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a + __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduwm(vector unsigned int __a, vector bool int __b) {
+ return __a + (vector unsigned int)__b;
+}
+
+/* vec_vaddfp */
+
+#define __builtin_altivec_vaddfp vec_vaddfp
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vaddfp(vector float __a, vector float __b) {
+ return __a + __b;
+}
+
+/* vec_addc */
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_addc(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_addc(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vaddcuw(__a, __b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
+ return (vector signed __int128)__builtin_altivec_vaddcuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vaddcuq(__a, __b);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_vaddcuw */
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vaddcuw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vaddcuw(__a, __b);
+}
+
+/* vec_adds */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_adds(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vaddsbs(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_adds(vector bool char __a, vector signed char __b) {
+ return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_adds(vector signed char __a, vector bool char __b) {
+ return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_adds(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vaddubs(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_adds(vector bool char __a, vector unsigned char __b) {
+ return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_adds(vector unsigned char __a, vector bool char __b) {
+ return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vaddshs(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_adds(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vaddshs((vector short)__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vaddshs(__a, (vector short)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_adds(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vadduhs(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_adds(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_adds(vector unsigned short __a, vector bool short __b) {
+ return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
+ vector int __b) {
+ return __builtin_altivec_vaddsws(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_adds(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vaddsws((vector int)__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vaddsws(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_adds(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vadduws(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_adds(vector bool int __a, vector unsigned int __b) {
+ return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_adds(vector unsigned int __a, vector bool int __b) {
+ return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
+}
+
+/* vec_vaddsbs */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddsbs(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vaddsbs(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddsbs(vector bool char __a, vector signed char __b) {
+ return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vaddsbs(vector signed char __a, vector bool char __b) {
+ return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
+}
+
+/* vec_vaddubs */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubs(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vaddubs(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubs(vector bool char __a, vector unsigned char __b) {
+ return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vaddubs(vector unsigned char __a, vector bool char __b) {
+ return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
+}
+
+/* vec_vaddshs */
+
+static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vaddshs(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vaddshs((vector short)__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vaddshs(__a, (vector short)__b);
+}
+
+/* vec_vadduhs */
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vadduhs(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector unsigned short __a, vector bool short __b) {
+ return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
+}
+
+/* vec_vaddsws */
+
+static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
+ vector int __b) {
+ return __builtin_altivec_vaddsws(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vaddsws((vector int)__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vaddsws(__a, (vector int)__b);
+}
+
+/* vec_vadduws */
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduws(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vadduws(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduws(vector bool int __a, vector unsigned int __b) {
+ return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vadduws(vector unsigned int __a, vector bool int __b) {
+ return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+/* vec_vadduqm */
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) {
+ return __a + __b;
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a + __b;
+}
+
+/* vec_vaddeuqm */
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+/* vec_vaddcuq */
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
+ return __builtin_altivec_vaddcuq(__a, __b);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vaddcuq(__a, __b);
+}
+
+/* vec_vaddecuq */
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_and */
+
+#define __builtin_altivec_vand vec_and
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_and(vector signed char __a, vector signed char __b) {
+ return __a & __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_and(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a & __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_and(vector signed char __a, vector bool char __b) {
+ return __a & (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_and(vector unsigned char __a, vector unsigned char __b) {
+ return __a & __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_and(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a & __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_and(vector unsigned char __a, vector bool char __b) {
+ return __a & (vector unsigned char)__b;
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_and(vector bool char __a,
+ vector bool char __b) {
+ return __a & __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
+ vector short __b) {
+ return __a & __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_and(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
+ vector bool short __b) {
+ return __a & (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_and(vector unsigned short __a, vector unsigned short __b) {
+ return __a & __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_and(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a & __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_and(vector unsigned short __a, vector bool short __b) {
+ return __a & (vector unsigned short)__b;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_and(vector bool short __a, vector bool short __b) {
+ return __a & __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
+ vector int __b) {
+ return __a & __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_and(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a & __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
+ vector bool int __b) {
+ return __a & (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_and(vector unsigned int __a, vector unsigned int __b) {
+ return __a & __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_and(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a & __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_and(vector unsigned int __a, vector bool int __b) {
+ return __a & (vector unsigned int)__b;
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_and(vector bool int __a,
+ vector bool int __b) {
+ return __a & __b;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_and(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_and(vector bool long long __a,
+ vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_and(vector double __a, vector bool long long __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_and(vector double __a,
+ vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_and(vector signed long long __a, vector signed long long __b) {
+ return __a & __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_and(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_and(vector signed long long __a, vector bool long long __b) {
+ return __a & (vector signed long long)__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_and(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_and(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_and(vector unsigned long long __a, vector bool long long __b) {
+ return __a & (vector unsigned long long)__b;
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_and(vector bool long long __a, vector bool long long __b) {
+ return __a & __b;
+}
+#endif
+
+/* vec_vand */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vand(vector signed char __a, vector signed char __b) {
+ return __a & __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vand(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a & __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vand(vector signed char __a, vector bool char __b) {
+ return __a & (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vand(vector unsigned char __a, vector unsigned char __b) {
+ return __a & __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vand(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a & __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vand(vector unsigned char __a, vector bool char __b) {
+ return __a & (vector unsigned char)__b;
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_vand(vector bool char __a,
+ vector bool char __b) {
+ return __a & __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
+ vector short __b) {
+ return __a & __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vand(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
+ vector bool short __b) {
+ return __a & (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vand(vector unsigned short __a, vector unsigned short __b) {
+ return __a & __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vand(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a & __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vand(vector unsigned short __a, vector bool short __b) {
+ return __a & (vector unsigned short)__b;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vand(vector bool short __a, vector bool short __b) {
+ return __a & __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
+ vector int __b) {
+ return __a & __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vand(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a & __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
+ vector bool int __b) {
+ return __a & (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vand(vector unsigned int __a, vector unsigned int __b) {
+ return __a & __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vand(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a & __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vand(vector unsigned int __a, vector bool int __b) {
+ return __a & (vector unsigned int)__b;
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_vand(vector bool int __a,
+ vector bool int __b) {
+ return __a & __b;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vand(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vand(vector signed long long __a, vector signed long long __b) {
+ return __a & __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vand(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vand(vector signed long long __a, vector bool long long __b) {
+ return __a & (vector signed long long)__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vand(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vand(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vand(vector unsigned long long __a, vector bool long long __b) {
+ return __a & (vector unsigned long long)__b;
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vand(vector bool long long __a, vector bool long long __b) {
+ return __a & __b;
+}
+#endif
+
+/* vec_andc */
+
+#define __builtin_altivec_vandc vec_andc
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_andc(vector signed char __a, vector signed char __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_andc(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a & ~__b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_andc(vector signed char __a, vector bool char __b) {
+ return __a & ~(vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_andc(vector unsigned char __a, vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_andc(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a & ~__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_andc(vector unsigned char __a, vector bool char __b) {
+ return __a & ~(vector unsigned char)__b;
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_andc(vector bool char __a,
+ vector bool char __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
+ vector short __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_andc(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & ~__b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
+ vector bool short __b) {
+ return __a & ~(vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_andc(vector unsigned short __a, vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_andc(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a & ~__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_andc(vector unsigned short __a, vector bool short __b) {
+ return __a & ~(vector unsigned short)__b;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_andc(vector bool short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
+ vector int __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_andc(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a & ~__b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
+ vector bool int __b) {
+ return __a & ~(vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_andc(vector unsigned int __a, vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_andc(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a & ~__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_andc(vector unsigned int __a, vector bool int __b) {
+ return __a & ~(vector unsigned int)__b;
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_andc(vector bool int __a,
+ vector bool int __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_andc(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_andc(vector bool long long __a,
+ vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_andc(vector double __a, vector bool long long __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_andc(vector double __a,
+ vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_andc(vector signed long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & ~__b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_andc(vector signed long long __a, vector bool long long __b) {
+ return __a & ~(vector signed long long)__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & ~__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_andc(vector unsigned long long __a, vector bool long long __b) {
+ return __a & ~(vector unsigned long long)__b;
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+#endif
+
+/* vec_vandc */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vandc(vector signed char __a, vector signed char __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vandc(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a & ~__b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vandc(vector signed char __a, vector bool char __b) {
+ return __a & ~(vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vandc(vector unsigned char __a, vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vandc(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a & ~__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vandc(vector unsigned char __a, vector bool char __b) {
+ return __a & ~(vector unsigned char)__b;
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vandc(vector bool char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
+ vector short __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vandc(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & ~__b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
+ vector bool short __b) {
+ return __a & ~(vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vandc(vector unsigned short __a, vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vandc(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a & ~__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vandc(vector unsigned short __a, vector bool short __b) {
+ return __a & ~(vector unsigned short)__b;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vandc(vector bool short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
+ vector int __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vandc(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a & ~__b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
+ vector bool int __b) {
+ return __a & ~(vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vandc(vector unsigned int __a, vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vandc(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a & ~__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vandc(vector unsigned int __a, vector bool int __b) {
+ return __a & ~(vector unsigned int)__b;
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a,
+ vector bool int __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vandc(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vandc(vector signed long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vandc(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & ~__b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vandc(vector signed long long __a, vector bool long long __b) {
+ return __a & ~(vector signed long long)__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vandc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vandc(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & ~__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vandc(vector unsigned long long __a, vector bool long long __b) {
+ return __a & ~(vector unsigned long long)__b;
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vandc(vector bool long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+#endif
+
+/* vec_avg */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_avg(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vavgsb(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_avg(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vavgub(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_avg(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vavgsh(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_avg(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vavguh(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_avg(vector int __a,
+ vector int __b) {
+ return __builtin_altivec_vavgsw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_avg(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vavguw(__a, __b);
+}
+
+/* vec_vavgsb */
+
+static __inline__ vector signed char __attribute__((__always_inline__))
+vec_vavgsb(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vavgsb(__a, __b);
+}
+
+/* vec_vavgub */
+
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+vec_vavgub(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vavgub(__a, __b);
+}
+
+/* vec_vavgsh */
+
+static __inline__ vector short __attribute__((__always_inline__))
+vec_vavgsh(vector short __a, vector short __b) {
+ return __builtin_altivec_vavgsh(__a, __b);
+}
+
+/* vec_vavguh */
+
+static __inline__ vector unsigned short __attribute__((__always_inline__))
+vec_vavguh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vavguh(__a, __b);
+}
+
+/* vec_vavgsw */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_vavgsw(vector int __a, vector int __b) {
+ return __builtin_altivec_vavgsw(__a, __b);
+}
+
+/* vec_vavguw */
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vavguw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vavguw(__a, __b);
+}
+
+/* vec_ceil */
+
+static __inline__ vector float __ATTRS_o_ai vec_ceil(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspip(__a);
+#else
+ return __builtin_altivec_vrfip(__a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_ceil(vector double __a) {
+ return __builtin_vsx_xvrdpip(__a);
+}
+#endif
+
+/* vec_vrfip */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vrfip(vector float __a) {
+ return __builtin_altivec_vrfip(__a);
+}
+
+/* vec_cmpb */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_cmpb(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp(__a, __b);
+}
+
+/* vec_vcmpbfp */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_vcmpbfp(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp(__a, __b);
+}
+
+/* vec_cmpeq */
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpeq(vector signed char __a, vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a,
+ vector short __b) {
+ return (vector bool short)__builtin_altivec_vcmpequh(__a, __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a,
+ vector int __b) {
+ return (vector bool int)__builtin_altivec_vcmpequw(__a, __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpequd(__a, __b);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpequd(
+ (vector long long)__a, (vector long long)__b);
+}
+#endif
+
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b);
+}
+#endif
+
+/* vec_cmpgt */
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpgt(vector signed char __a, vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a,
+ vector short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a,
+ vector int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b);
+}
+#endif
+
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
+}
+#endif
+
+/* vec_cmpge */
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpge(vector signed char __a, vector signed char __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpge(vector signed short __a, vector signed short __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpge(vector signed int __a, vector signed int __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpge(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpge(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpge(vector signed long long __a, vector signed long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+#endif
+
+/* vec_vcmpgefp */
+
+static __inline__ vector bool int __attribute__((__always_inline__))
+vec_vcmpgefp(vector float __a, vector float __b) {
+ return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+}
+
+/* vec_vcmpgtsb */
+
+static __inline__ vector bool char __attribute__((__always_inline__))
+vec_vcmpgtsb(vector signed char __a, vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
+}
+
+/* vec_vcmpgtub */
+
+static __inline__ vector bool char __attribute__((__always_inline__))
+vec_vcmpgtub(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
+}
+
+/* vec_vcmpgtsh */
+
+static __inline__ vector bool short __attribute__((__always_inline__))
+vec_vcmpgtsh(vector short __a, vector short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
+}
+
+/* vec_vcmpgtuh */
+
+static __inline__ vector bool short __attribute__((__always_inline__))
+vec_vcmpgtuh(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
+}
+
+/* vec_vcmpgtsw */
+
+static __inline__ vector bool int __attribute__((__always_inline__))
+vec_vcmpgtsw(vector int __a, vector int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
+}
+
+/* vec_vcmpgtuw */
+
+static __inline__ vector bool int __attribute__((__always_inline__))
+vec_vcmpgtuw(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
+}
+
+/* vec_vcmpgtfp */
+
+static __inline__ vector bool int __attribute__((__always_inline__))
+vec_vcmpgtfp(vector float __a, vector float __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
+}
+
+/* vec_cmple */
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmple(vector signed char __a, vector signed char __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmple(vector unsigned char __a, vector unsigned char __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmple(vector signed short __a, vector signed short __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmple(vector unsigned short __a, vector unsigned short __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmple(vector signed int __a, vector signed int __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmple(vector unsigned int __a, vector unsigned int __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_cmple(vector float __a,
+ vector float __b) {
+ return vec_cmpge(__b, __a);
+}
+
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmple(vector double __a, vector double __b) {
+ return vec_cmpge(__b, __a);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmple(vector signed long long __a, vector signed long long __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_cmpge(__b, __a);
+}
+#endif
+
+/* vec_cmplt */
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmplt(vector signed char __a, vector signed char __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
+ vector short __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector int __a,
+ vector int __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
+ vector float __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmplt(vector double __a, vector double __b) {
+ return vec_cmpgt(__b, __a);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmplt(vector signed long long __a, vector signed long long __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+/* vec_cntlz */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_cntlz(vector signed char __a) {
+ return __builtin_altivec_vclzb(__a);
+}
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_cntlz(vector unsigned char __a) {
+ return __builtin_altivec_vclzb(__a);
+}
+static __inline__ vector signed short __ATTRS_o_ai
+vec_cntlz(vector signed short __a) {
+ return __builtin_altivec_vclzh(__a);
+}
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_cntlz(vector unsigned short __a) {
+ return __builtin_altivec_vclzh(__a);
+}
+static __inline__ vector signed int __ATTRS_o_ai
+vec_cntlz(vector signed int __a) {
+ return __builtin_altivec_vclzw(__a);
+}
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_cntlz(vector unsigned int __a) {
+ return __builtin_altivec_vclzw(__a);
+}
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_cntlz(vector signed long long __a) {
+ return __builtin_altivec_vclzd(__a);
+}
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cntlz(vector unsigned long long __a) {
+ return __builtin_altivec_vclzd(__a);
+}
+#endif
+
+/* vec_cpsgn */
+
+#ifdef __VSX__
+static __inline__ vector float __ATTRS_o_ai vec_cpsgn(vector float __a,
+ vector float __b) {
+ return __builtin_vsx_xvcpsgnsp(__a, __b);
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcpsgndp(__a, __b);
+}
+#endif
+
+/* vec_ctf */
+
+static __inline__ vector float __ATTRS_o_ai vec_ctf(vector int __a, int __b) {
+ return __builtin_altivec_vcfsx(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_ctf(vector unsigned int __a,
+ int __b) {
+ return __builtin_altivec_vcfux((vector int)__a, __b);
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai
+vec_ctf(vector unsigned long long __a, int __b) {
+ vector double __ret = __builtin_convertvector(__a, vector double);
+ __ret *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_ctf(vector signed long long __a, int __b) {
+ vector double __ret = __builtin_convertvector(__a, vector double);
+ __ret *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ return __ret;
+}
+#endif
+
+/* vec_vcfsx */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vcfsx(vector int __a, int __b) {
+ return __builtin_altivec_vcfsx(__a, __b);
+}
+
+/* vec_vcfux */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vcfux(vector unsigned int __a, int __b) {
+ return __builtin_altivec_vcfux((vector int)__a, __b);
+}
+
+/* vec_cts */
+
+static __inline__ vector int __ATTRS_o_ai vec_cts(vector float __a, int __b) {
+ return __builtin_altivec_vctsxs(__a, __b);
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_cts(vector double __a, int __b) {
+ __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+ return __builtin_convertvector(__a, vector signed long long);
+}
+#endif
+
+/* vec_vctsxs */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_vctsxs(vector float __a, int __b) {
+ return __builtin_altivec_vctsxs(__a, __b);
+}
+
+/* vec_ctu */
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_ctu(vector float __a,
+ int __b) {
+ return __builtin_altivec_vctuxs(__a, __b);
+}
+
+#ifdef __VSX__
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_ctu(vector double __a, int __b) {
+ __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+ return __builtin_convertvector(__a, vector unsigned long long);
+}
+#endif
+
+/* vec_vctuxs */
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vctuxs(vector float __a, int __b) {
+ return __builtin_altivec_vctuxs(__a, __b);
+}
+
+/* vec_double */
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai
+vec_double(vector signed long long __a) {
+ vector double __ret = {__a[0], __a[1]};
+ return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_double(vector unsigned long long __a) {
+ vector double __ret = {__a[0], __a[1]};
+ return __ret;
+}
+#endif
+
+/* vec_div */
+
+/* Integer vector divides (vectors are scalarized, elements divided
+ and the vectors reassembled).
+*/
+static __inline__ vector signed char __ATTRS_o_ai
+vec_div(vector signed char __a, vector signed char __b) {
+ return __a / __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_div(vector unsigned char __a, vector unsigned char __b) {
+ return __a / __b;
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_div(vector signed short __a, vector signed short __b) {
+ return __a / __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_div(vector unsigned short __a, vector unsigned short __b) {
+ return __a / __b;
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_div(vector signed int __a, vector signed int __b) {
+ return __a / __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_div(vector unsigned int __a, vector unsigned int __b) {
+ return __a / __b;
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_div(vector signed long long __a, vector signed long long __b) {
+ return __a / __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_div(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a / __b;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_div(vector float __a,
+ vector float __b) {
+ return __a / __b;
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_div(vector double __a,
+ vector double __b) {
+ return __a / __b;
+}
+#endif
+
+/* vec_dss */
+
+static __inline__ void __attribute__((__always_inline__)) vec_dss(int __a) {
+ __builtin_altivec_dss(__a);
+}
+
+/* vec_dssall */
+
+static __inline__ void __attribute__((__always_inline__)) vec_dssall(void) {
+ __builtin_altivec_dssall();
+}
+
+/* vec_dst */
+#define vec_dst(__PTR, __CW, __STR) \
+ __extension__( \
+ { __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR)); })
+
+/* vec_dstst */
+#define vec_dstst(__PTR, __CW, __STR) \
+ __extension__( \
+ { __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR)); })
+
+/* vec_dststt */
+#define vec_dststt(__PTR, __CW, __STR) \
+ __extension__( \
+ { __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR)); })
+
+/* vec_dstt */
+#define vec_dstt(__PTR, __CW, __STR) \
+ __extension__( \
+ { __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR)); })
+
+/* vec_eqv */
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed char __ATTRS_o_ai
+vec_eqv(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_eqv(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
+ vector bool char __b) {
+ return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_eqv(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_eqv(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_eqv(vector bool short __a, vector bool short __b) {
+ return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_eqv(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_eqv(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_vsx_xxleqv(__a, __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
+ vector bool int __b) {
+ return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_eqv(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_vsx_xxleqv(
+ (vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_vsx_xxleqv(
+ (vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_eqv(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_eqv(vector float __a,
+ vector float __b) {
+ return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_eqv(vector double __a,
+ vector double __b) {
+ return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+#endif
+
+/* vec_expte */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_expte(vector float __a) {
+ return __builtin_altivec_vexptefp(__a);
+}
+
+/* vec_vexptefp */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vexptefp(vector float __a) {
+ return __builtin_altivec_vexptefp(__a);
+}
+
+/* vec_floor */
+
+static __inline__ vector float __ATTRS_o_ai vec_floor(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspim(__a);
+#else
+ return __builtin_altivec_vrfim(__a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_floor(vector double __a) {
+ return __builtin_vsx_xvrdpim(__a);
+}
+#endif
+
+/* vec_vrfim */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vrfim(vector float __a) {
+ return __builtin_altivec_vrfim(__a);
+}
+
+/* vec_ld */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_ld(int __a, const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_ld(int __a, const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_ld(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_ld(int __a, const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_ld(int __a, const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_ld(int __a,
+ const vector short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_ld(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_ld(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_ld(int __a, const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_ld(int __a, const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_ld(int __a,
+ const vector pixel *__b) {
+ return (vector pixel)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_ld(int __a,
+ const vector int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_ld(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_ld(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_ld(int __a, const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_ld(int __a, const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_ld(int __a,
+ const vector float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_ld(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+/* vec_lvx */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvx(int __a, const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvx(int __a, const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvx(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvx(int __a, const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvx(int __a, const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a,
+ const vector short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvx(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvx(int __a, const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvx(int __a, const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_lvx(int __a,
+ const vector pixel *__b) {
+ return (vector pixel)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a,
+ const vector int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvx(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvx(int __a, const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvx(int __a, const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a,
+ const vector float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+/* vec_lde */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lde(int __a, const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvebx(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lde(int __a, const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lde(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvehx(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lde(int __a, const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lde(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lde(int __a, const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lde(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvewx(__a, __b);
+}
+
+/* vec_lvebx */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvebx(int __a, const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvebx(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvebx(int __a, const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
+}
+
+/* vec_lvehx */
+
+static __inline__ vector short __ATTRS_o_ai vec_lvehx(int __a,
+ const short *__b) {
+ return (vector short)__builtin_altivec_lvehx(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvehx(int __a, const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
+}
+
+/* vec_lvewx */
+
+static __inline__ vector int __ATTRS_o_ai vec_lvewx(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvewx(int __a, const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvewx(int __a,
+ const float *__b) {
+ return (vector float)__builtin_altivec_lvewx(__a, __b);
+}
+
+/* vec_ldl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_ldl(int __a, const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_ldl(int __a, const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_ldl(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_ldl(int __a, const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_ldl(int __a, const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a,
+ const vector short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_ldl(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_ldl(int __a, const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_ldl(int __a, const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_ldl(int __a,
+ const vector pixel *__b) {
+ return (vector pixel short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a,
+ const vector int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_ldl(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_ldl(int __a, const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_ldl(int __a, const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a,
+ const vector float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+/* vec_lvxl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvxl(int __a, const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvxl(int __a, const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvxl(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvxl(int __a, const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvxl(int __a, const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
+ const vector short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
+ const short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvxl(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvxl(int __a, const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvxl(int __a, const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_lvxl(int __a,
+ const vector pixel *__b) {
+ return (vector pixel)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a,
+ const vector int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvxl(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvxl(int __a, const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvxl(int __a, const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
+ const vector float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
+ const float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+/* vec_loge */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_loge(vector float __a) {
+ return __builtin_altivec_vlogefp(__a);
+}
+
+/* vec_vlogefp */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vlogefp(vector float __a) {
+ return __builtin_altivec_vlogefp(__a);
+}
+
+/* vec_lvsl */
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const signed char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsl(int __a, const signed char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsl(int __a, const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsl(int __a, const unsigned short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsl(int __a, const unsigned int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const float *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const float *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+/* vec_lvsr */
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const signed char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsr(int __a, const signed char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsr(int __a, const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsr(int __a, const unsigned short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvsr(int __a, const unsigned int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const float *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const float *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+/* vec_madd */
+static __inline__ vector signed short __ATTRS_o_ai
+vec_mladd(vector signed short, vector signed short, vector signed short);
+static __inline__ vector signed short __ATTRS_o_ai
+vec_mladd(vector signed short, vector unsigned short, vector unsigned short);
+static __inline__ vector signed short __ATTRS_o_ai
+vec_mladd(vector unsigned short, vector signed short, vector signed short);
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short);
+
+static __inline__ vector signed short __ATTRS_o_ai vec_madd(
+ vector signed short __a, vector signed short __b, vector signed short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_madd(vector signed short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_madd(vector unsigned short __a, vector signed short __b,
+ vector signed short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_madd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_madd(vector float __a,
+ vector float __b,
+ vector float __c) {
+#ifdef __VSX__
+ return __builtin_vsx_xvmaddasp(__a, __b, __c);
+#else
+ return __builtin_altivec_vmaddfp(__a, __b, __c);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_madd(vector double __a,
+ vector double __b,
+ vector double __c) {
+ return __builtin_vsx_xvmaddadp(__a, __b, __c);
+}
+#endif
+
+/* vec_vmaddfp */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vmaddfp(vector float __a, vector float __b, vector float __c) {
+ return __builtin_altivec_vmaddfp(__a, __b, __c);
+}
+
+/* vec_madds */
+
+static __inline__ vector signed short __attribute__((__always_inline__))
+vec_madds(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __builtin_altivec_vmhaddshs(__a, __b, __c);
+}
+
+/* vec_vmhaddshs */
+static __inline__ vector signed short __attribute__((__always_inline__))
+vec_vmhaddshs(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __builtin_altivec_vmhaddshs(__a, __b, __c);
+}
+
+/* vec_msub */
+
+#ifdef __VSX__
+static __inline__ vector float __ATTRS_o_ai vec_msub(vector float __a,
+ vector float __b,
+ vector float __c) {
+ return __builtin_vsx_xvmsubasp(__a, __b, __c);
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_msub(vector double __a,
+ vector double __b,
+ vector double __c) {
+ return __builtin_vsx_xvmsubadp(__a, __b, __c);
+}
+#endif
+
+/* vec_max */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_max(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vmaxsb(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_max(vector bool char __a, vector signed char __b) {
+ return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_max(vector signed char __a, vector bool char __b) {
+ return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_max(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vmaxub(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_max(vector bool char __a, vector unsigned char __b) {
+ return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_max(vector unsigned char __a, vector bool char __b) {
+ return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vmaxsh(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_max(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vmaxsh((vector short)__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vmaxsh(__a, (vector short)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_max(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_max(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_max(vector unsigned short __a, vector bool short __b) {
+ return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
+ vector int __b) {
+ return __builtin_altivec_vmaxsw(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_max(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vmaxsw((vector int)__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vmaxsw(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_max(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_max(vector bool int __a, vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_max(vector unsigned int __a, vector bool int __b) {
+ return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_max(vector signed long long __a, vector signed long long __b) {
+ return __builtin_altivec_vmaxsd(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_max(vector bool long long __a, vector signed long long __b) {
+ return __builtin_altivec_vmaxsd((vector signed long long)__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_max(vector signed long long __a, vector bool long long __b) {
+ return __builtin_altivec_vmaxsd(__a, (vector signed long long)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_max(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vmaxud(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_max(vector bool long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vmaxud((vector unsigned long long)__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_max(vector unsigned long long __a, vector bool long long __b) {
+ return __builtin_altivec_vmaxud(__a, (vector unsigned long long)__b);
+}
+#endif
+
+static __inline__ vector float __ATTRS_o_ai vec_max(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvmaxsp(__a, __b);
+#else
+ return __builtin_altivec_vmaxfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_max(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvmaxdp(__a, __b);
+}
+#endif
+
+/* vec_vmaxsb */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vmaxsb(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vmaxsb(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vmaxsb(vector bool char __a, vector signed char __b) {
+ return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vmaxsb(vector signed char __a, vector bool char __b) {
+ return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
+}
+
+/* vec_vmaxub */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vmaxub(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vmaxub(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vmaxub(vector bool char __a, vector unsigned char __b) {
+ return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vmaxub(vector unsigned char __a, vector bool char __b) {
+ return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
+}
+
+/* vec_vmaxsh */
+
+static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vmaxsh(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vmaxsh((vector short)__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vmaxsh(__a, (vector short)__b);
+}
+
+/* vec_vmaxuh */
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector unsigned short __a, vector bool short __b) {
+ return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
+}
+
+/* vec_vmaxsw */
+
+static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
+ vector int __b) {
+ return __builtin_altivec_vmaxsw(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vmaxsw((vector int)__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vmaxsw(__a, (vector int)__b);
+}
+
+/* vec_vmaxuw */
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vmaxuw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vmaxuw(vector bool int __a, vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vmaxuw(vector unsigned int __a, vector bool int __b) {
+ return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
+}
+
+/* vec_vmaxfp */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vmaxfp(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvmaxsp(__a, __b);
+#else
+ return __builtin_altivec_vmaxfp(__a, __b);
+#endif
+}
+
+/* vec_mergeh */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_mergeh(vector signed char __a, vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_mergeh(vector bool char __a, vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_mergeh(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_mergeh(vector bool short __a, vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_mergeh(vector int __a,
+ vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_mergeh(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mergeh(vector signed long long __a, vector signed long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mergeh(vector signed long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector signed long long)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector signed long long __b) {
+ return vec_perm((vector signed long long)__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mergeh(vector unsigned long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector unsigned long long)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
+ return vec_perm((vector unsigned long long)__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector bool long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_mergeh(vector double __a,
+ vector double __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+static __inline__ vector double __ATTRS_o_ai
+vec_mergeh(vector double __a, vector bool long long __b) {
+ return vec_perm(__a, (vector double)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+static __inline__ vector double __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector double __b) {
+ return vec_perm((vector double)__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+#endif
+
+/* vec_vmrghb */
+
+#define __builtin_altivec_vmrghb vec_vmrghb
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vmrghb(vector signed char __a, vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vmrghb(vector unsigned char __a, vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vmrghb(vector bool char __a, vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+/* vec_vmrghh */
+
+#define __builtin_altivec_vmrghh vec_vmrghh
+
+static __inline__ vector short __ATTRS_o_ai vec_vmrghh(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vmrghh(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vmrghh(vector bool short __a, vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+/* vec_vmrghw */
+
+#define __builtin_altivec_vmrghw vec_vmrghw
+
+static __inline__ vector int __ATTRS_o_ai vec_vmrghw(vector int __a,
+ vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vmrghw(vector unsigned int __a, vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vmrghw(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+/* vec_mergel */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_mergel(vector signed char __a, vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_mergel(vector unsigned char __a, vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_mergel(vector bool char __a, vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_mergel(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mergel(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_mergel(vector bool short __a, vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_mergel(vector int __a,
+ vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mergel(vector unsigned int __a, vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_mergel(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mergel(vector signed long long __a, vector signed long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mergel(vector signed long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector signed long long)__b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector signed long long __b) {
+ return vec_perm((vector signed long long)__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mergel(vector unsigned long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector unsigned long long)__b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector unsigned long long __b) {
+ return vec_perm((vector unsigned long long)__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector bool long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static __inline__ vector double __ATTRS_o_ai vec_mergel(vector double __a,
+ vector double __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static __inline__ vector double __ATTRS_o_ai
+vec_mergel(vector double __a, vector bool long long __b) {
+ return vec_perm(__a, (vector double)__b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static __inline__ vector double __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector double __b) {
+ return vec_perm((vector double)__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+#endif
+
+/* vec_vmrglb */
+
+#define __builtin_altivec_vmrglb vec_vmrglb
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vmrglb(vector signed char __a, vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vmrglb(vector unsigned char __a, vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vmrglb(vector bool char __a, vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+/* vec_vmrglh */
+
+#define __builtin_altivec_vmrglh vec_vmrglh
+
+static __inline__ vector short __ATTRS_o_ai vec_vmrglh(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vmrglh(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vmrglh(vector bool short __a, vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+/* vec_vmrglw */
+
+#define __builtin_altivec_vmrglw vec_vmrglw
+
+static __inline__ vector int __ATTRS_o_ai vec_vmrglw(vector int __a,
+ vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vmrglw(vector unsigned int __a, vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vmrglw(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+#ifdef __POWER8_VECTOR__
+/* vec_mergee */
+
+static __inline__ vector bool int __ATTRS_o_ai vec_mergee(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
+ 0x18, 0x19, 0x1A, 0x1B));
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_mergee(vector signed int __a, vector signed int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
+ 0x18, 0x19, 0x1A, 0x1B));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mergee(vector unsigned int __a, vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
+ 0x18, 0x19, 0x1A, 0x1B));
+}
+
+/* vec_mergeo */
+
+static __inline__ vector bool int __ATTRS_o_ai vec_mergeo(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
+ 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_mergeo(vector signed int __a, vector signed int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
+ 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mergeo(vector unsigned int __a, vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
+ 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+#endif
+
+/* vec_mfvscr */
+
+static __inline__ vector unsigned short __attribute__((__always_inline__))
+vec_mfvscr(void) {
+ return __builtin_altivec_mfvscr();
+}
+
+/* vec_min */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_min(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vminsb(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_min(vector bool char __a, vector signed char __b) {
+ return __builtin_altivec_vminsb((vector signed char)__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_min(vector signed char __a, vector bool char __b) {
+ return __builtin_altivec_vminsb(__a, (vector signed char)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_min(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vminub(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_min(vector bool char __a, vector unsigned char __b) {
+ return __builtin_altivec_vminub((vector unsigned char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_min(vector unsigned char __a, vector bool char __b) {
+ return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vminsh(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_min(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vminsh((vector short)__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vminsh(__a, (vector short)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_min(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vminuh(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_min(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_min(vector unsigned short __a, vector bool short __b) {
+ return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
+ vector int __b) {
+ return __builtin_altivec_vminsw(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_min(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vminsw((vector int)__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vminsw(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_min(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vminuw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_min(vector bool int __a, vector unsigned int __b) {
+ return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_min(vector unsigned int __a, vector bool int __b) {
+ return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_min(vector signed long long __a, vector signed long long __b) {
+ return __builtin_altivec_vminsd(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_min(vector bool long long __a, vector signed long long __b) {
+ return __builtin_altivec_vminsd((vector signed long long)__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_min(vector signed long long __a, vector bool long long __b) {
+ return __builtin_altivec_vminsd(__a, (vector signed long long)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_min(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vminud(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_min(vector bool long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vminud((vector unsigned long long)__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_min(vector unsigned long long __a, vector bool long long __b) {
+ return __builtin_altivec_vminud(__a, (vector unsigned long long)__b);
+}
+#endif
+
+static __inline__ vector float __ATTRS_o_ai vec_min(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvminsp(__a, __b);
+#else
+ return __builtin_altivec_vminfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_min(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvmindp(__a, __b);
+}
+#endif
+
+/* vec_vminsb */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vminsb(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vminsb(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vminsb(vector bool char __a, vector signed char __b) {
+ return __builtin_altivec_vminsb((vector signed char)__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vminsb(vector signed char __a, vector bool char __b) {
+ return __builtin_altivec_vminsb(__a, (vector signed char)__b);
+}
+
+/* vec_vminub */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vminub(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vminub(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vminub(vector bool char __a, vector unsigned char __b) {
+ return __builtin_altivec_vminub((vector unsigned char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vminub(vector unsigned char __a, vector bool char __b) {
+ return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
+}
+
+/* vec_vminsh */
+
+static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vminsh(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vminsh((vector short)__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vminsh(__a, (vector short)__b);
+}
+
+/* vec_vminuh */
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vminuh(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector unsigned short __a, vector bool short __b) {
+ return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
+}
+
+/* vec_vminsw */
+
+static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
+ vector int __b) {
+ return __builtin_altivec_vminsw(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vminsw((vector int)__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vminsw(__a, (vector int)__b);
+}
+
+/* vec_vminuw */
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vminuw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vminuw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vminuw(vector bool int __a, vector unsigned int __b) {
+ return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vminuw(vector unsigned int __a, vector bool int __b) {
+ return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
+}
+
+/* vec_vminfp */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vminfp(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvminsp(__a, __b);
+#else
+ return __builtin_altivec_vminfp(__a, __b);
+#endif
+}
+
+/* vec_mladd */
+
+#define __builtin_altivec_vmladduhm vec_mladd
+
+static __inline__ vector short __ATTRS_o_ai vec_mladd(vector short __a,
+ vector short __b,
+ vector short __c) {
+ return __a * __b + __c;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_mladd(
+ vector short __a, vector unsigned short __b, vector unsigned short __c) {
+ return __a * (vector short)__b + (vector short)__c;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
+ vector short __b,
+ vector short __c) {
+ return (vector short)__a * __b + __c;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mladd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * __b + __c;
+}
+
+/* vec_vmladduhm */
+
+static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
+ vector short __b,
+ vector short __c) {
+ return __a * __b + __c;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(
+ vector short __a, vector unsigned short __b, vector unsigned short __c) {
+ return __a * (vector short)__b + (vector short)__c;
+}
+
+static __inline__ vector short __ATTRS_o_ai
+vec_vmladduhm(vector unsigned short __a, vector short __b, vector short __c) {
+ return (vector short)__a * __b + __c;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vmladduhm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * __b + __c;
+}
+
+/* vec_mradds */
+
+static __inline__ vector short __attribute__((__always_inline__))
+vec_mradds(vector short __a, vector short __b, vector short __c) {
+ return __builtin_altivec_vmhraddshs(__a, __b, __c);
+}
+
+/* vec_vmhraddshs */
+
+static __inline__ vector short __attribute__((__always_inline__))
+vec_vmhraddshs(vector short __a, vector short __b, vector short __c) {
+ return __builtin_altivec_vmhraddshs(__a, __b, __c);
+}
+
+/* vec_msum */
+
+static __inline__ vector int __ATTRS_o_ai vec_msum(vector signed char __a,
+ vector unsigned char __b,
+ vector int __c) {
+ return __builtin_altivec_vmsummbm(__a, __b, __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_msum(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumubm(__a, __b, __c);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_msum(vector short __a,
+ vector short __b,
+ vector int __c) {
+ return __builtin_altivec_vmsumshm(__a, __b, __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_msum(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhm(__a, __b, __c);
+}
+
+/* vec_vmsummbm */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_vmsummbm(vector signed char __a, vector unsigned char __b, vector int __c) {
+ return __builtin_altivec_vmsummbm(__a, __b, __c);
+}
+
+/* vec_vmsumubm */
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vmsumubm(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumubm(__a, __b, __c);
+}
+
+/* vec_vmsumshm */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_vmsumshm(vector short __a, vector short __b, vector int __c) {
+ return __builtin_altivec_vmsumshm(__a, __b, __c);
+}
+
+/* vec_vmsumuhm */
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vmsumuhm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhm(__a, __b, __c);
+}
+
+/* vec_msums */
+
+static __inline__ vector int __ATTRS_o_ai vec_msums(vector short __a,
+ vector short __b,
+ vector int __c) {
+ return __builtin_altivec_vmsumshs(__a, __b, __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_msums(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhs(__a, __b, __c);
+}
+
+/* vec_vmsumshs */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_vmsumshs(vector short __a, vector short __b, vector int __c) {
+ return __builtin_altivec_vmsumshs(__a, __b, __c);
+}
+
+/* vec_vmsumuhs */
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vmsumuhs(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhs(__a, __b, __c);
+}
+
+/* vec_mtvscr */
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector signed char __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool char __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector short __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool short __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector pixel __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector int __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool int __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static __inline__ void __ATTRS_o_ai vec_mtvscr(vector float __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+/* vec_mul */
+
+/* Integer vector multiplication will involve multiplication of the odd/even
+ elements separately, then truncating the results and moving to the
+ result vector.
+*/
+static __inline__ vector signed char __ATTRS_o_ai
+vec_mul(vector signed char __a, vector signed char __b) {
+ return __a * __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_mul(vector unsigned char __a, vector unsigned char __b) {
+ return __a * __b;
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_mul(vector signed short __a, vector signed short __b) {
+ return __a * __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mul(vector unsigned short __a, vector unsigned short __b) {
+ return __a * __b;
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_mul(vector signed int __a, vector signed int __b) {
+ return __a * __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mul(vector unsigned int __a, vector unsigned int __b) {
+ return __a * __b;
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mul(vector signed long long __a, vector signed long long __b) {
+ return __a * __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mul(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a * __b;
+}
+#endif
+
+static __inline__ vector float __ATTRS_o_ai vec_mul(vector float __a,
+ vector float __b) {
+ return __a * __b;
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_mul(vector double __a,
+ vector double __b) {
+ return __a * __b;
+}
+#endif
+
+/* The vmulos* and vmules* instructions have a big endian bias, so
+ we must reverse the meaning of "even" and "odd" for little endian. */
+
+/* vec_mule */
+
+static __inline__ vector short __ATTRS_o_ai vec_mule(vector signed char __a,
+ vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosb(__a, __b);
+#else
+ return __builtin_altivec_vmulesb(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mule(vector unsigned char __a, vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloub(__a, __b);
+#else
+ return __builtin_altivec_vmuleub(__a, __b);
+#endif
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_mule(vector short __a,
+ vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosh(__a, __b);
+#else
+ return __builtin_altivec_vmulesh(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mule(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouh(__a, __b);
+#else
+ return __builtin_altivec_vmuleuh(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mule(vector signed int __a, vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosw(__a, __b);
+#else
+ return __builtin_altivec_vmulesw(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mule(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouw(__a, __b);
+#else
+ return __builtin_altivec_vmuleuw(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vmulesb */
+
+static __inline__ vector short __attribute__((__always_inline__))
+vec_vmulesb(vector signed char __a, vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosb(__a, __b);
+#else
+ return __builtin_altivec_vmulesb(__a, __b);
+#endif
+}
+
+/* vec_vmuleub */
+
+static __inline__ vector unsigned short __attribute__((__always_inline__))
+vec_vmuleub(vector unsigned char __a, vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloub(__a, __b);
+#else
+ return __builtin_altivec_vmuleub(__a, __b);
+#endif
+}
+
+/* vec_vmulesh */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_vmulesh(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosh(__a, __b);
+#else
+ return __builtin_altivec_vmulesh(__a, __b);
+#endif
+}
+
+/* vec_vmuleuh */
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouh(__a, __b);
+#else
+ return __builtin_altivec_vmuleuh(__a, __b);
+#endif
+}
+
+/* vec_mulo */
+
+static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
+ vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesb(__a, __b);
+#else
+ return __builtin_altivec_vmulosb(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_mulo(vector unsigned char __a, vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleub(__a, __b);
+#else
+ return __builtin_altivec_vmuloub(__a, __b);
+#endif
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_mulo(vector short __a,
+ vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesh(__a, __b);
+#else
+ return __builtin_altivec_vmulosh(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mulo(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuh(__a, __b);
+#else
+ return __builtin_altivec_vmulouh(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mulo(vector signed int __a, vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesw(__a, __b);
+#else
+ return __builtin_altivec_vmulosw(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mulo(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuw(__a, __b);
+#else
+ return __builtin_altivec_vmulouw(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vmulosb */
+
+static __inline__ vector short __attribute__((__always_inline__))
+vec_vmulosb(vector signed char __a, vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesb(__a, __b);
+#else
+ return __builtin_altivec_vmulosb(__a, __b);
+#endif
+}
+
+/* vec_vmuloub */
+
+static __inline__ vector unsigned short __attribute__((__always_inline__))
+vec_vmuloub(vector unsigned char __a, vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleub(__a, __b);
+#else
+ return __builtin_altivec_vmuloub(__a, __b);
+#endif
+}
+
+/* vec_vmulosh */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_vmulosh(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesh(__a, __b);
+#else
+ return __builtin_altivec_vmulosh(__a, __b);
+#endif
+}
+
+/* vec_vmulouh */
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vmulouh(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuh(__a, __b);
+#else
+ return __builtin_altivec_vmulouh(__a, __b);
+#endif
+}
+
+/* vec_nand */
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed char __ATTRS_o_ai
+vec_nand(vector signed char __a, vector signed char __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_nand(vector signed char __a, vector bool char __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_nand(vector bool char __a, vector signed char __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_nand(vector unsigned char __a, vector unsigned char __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_nand(vector unsigned char __a, vector bool char __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_nand(vector bool char __a, vector unsigned char __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_nand(vector signed short __a, vector signed short __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_nand(vector signed short __a, vector bool short __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_nand(vector bool short __a, vector signed short __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_nand(vector unsigned short __a, vector unsigned short __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_nand(vector unsigned short __a, vector bool short __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_nand(vector bool short __a, vector bool short __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_nand(vector signed int __a, vector signed int __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
+ vector bool int __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_nand(vector bool int __a, vector signed int __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_nand(vector unsigned int __a, vector unsigned int __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_nand(vector unsigned int __a, vector bool int __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_nand(vector bool int __a, vector unsigned int __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_nand(vector signed long long __a, vector signed long long __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_nand(vector signed long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector signed long long __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_nand(vector unsigned long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector unsigned long long __b) {
+ return ~(__a & __b);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
+#endif
+
+/* vec_nmadd */
+
+#ifdef __VSX__
+static __inline__ vector float __ATTRS_o_ai vec_nmadd(vector float __a,
+ vector float __b,
+ vector float __c) {
+ return __builtin_vsx_xvnmaddasp(__a, __b, __c);
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_nmadd(vector double __a,
+ vector double __b,
+ vector double __c) {
+ return __builtin_vsx_xvnmaddadp(__a, __b, __c);
+}
+#endif
+
+/* vec_nmsub */
+
+static __inline__ vector float __ATTRS_o_ai vec_nmsub(vector float __a,
+ vector float __b,
+ vector float __c) {
+#ifdef __VSX__
+ return __builtin_vsx_xvnmsubasp(__a, __b, __c);
+#else
+ return __builtin_altivec_vnmsubfp(__a, __b, __c);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_nmsub(vector double __a,
+ vector double __b,
+ vector double __c) {
+ return __builtin_vsx_xvnmsubadp(__a, __b, __c);
+}
+#endif
+
+/* vec_vnmsubfp */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vnmsubfp(vector float __a, vector float __b, vector float __c) {
+ return __builtin_altivec_vnmsubfp(__a, __b, __c);
+}
+
+/* vec_nor */
+
+#define __builtin_altivec_vnor vec_nor
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_nor(vector signed char __a, vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_nor(vector unsigned char __a, vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_nor(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_nor(vector short __a,
+ vector short __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_nor(vector unsigned short __a, vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_nor(vector bool short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_nor(vector int __a,
+ vector int __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_nor(vector unsigned int __a, vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_nor(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_nor(vector float __a,
+ vector float __b) {
+ vector unsigned int __res =
+ ~((vector unsigned int)__a | (vector unsigned int)__b);
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_nor(vector double __a,
+ vector double __b) {
+ vector unsigned long long __res =
+ ~((vector unsigned long long)__a | (vector unsigned long long)__b);
+ return (vector double)__res;
+}
+#endif
+
+/* vec_vnor */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vnor(vector signed char __a, vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vnor(vector unsigned char __a, vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vnor(vector short __a,
+ vector short __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vnor(vector unsigned short __a, vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vnor(vector bool short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vnor(vector int __a,
+ vector int __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vnor(vector unsigned int __a, vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vnor(vector float __a,
+ vector float __b) {
+ vector unsigned int __res =
+ ~((vector unsigned int)__a | (vector unsigned int)__b);
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_nor(vector signed long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_nor(vector bool long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+#endif
+
+/* vec_or */
+
+#define __builtin_altivec_vor vec_or
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_or(vector signed char __a, vector signed char __b) {
+ return __a | __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_or(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a | __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
+ vector bool char __b) {
+ return __a | (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_or(vector unsigned char __a, vector unsigned char __b) {
+ return __a | __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_or(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a | __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_or(vector unsigned char __a, vector bool char __b) {
+ return __a | (vector unsigned char)__b;
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_or(vector bool char __a,
+ vector bool char __b) {
+ return __a | __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
+ vector short __b) {
+ return __a | __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_or(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a | __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
+ vector bool short __b) {
+ return __a | (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_or(vector unsigned short __a, vector unsigned short __b) {
+ return __a | __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_or(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a | __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_or(vector unsigned short __a, vector bool short __b) {
+ return __a | (vector unsigned short)__b;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai vec_or(vector bool short __a,
+ vector bool short __b) {
+ return __a | __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
+ vector int __b) {
+ return __a | __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_or(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a | __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
+ vector bool int __b) {
+ return __a | (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_or(vector unsigned int __a, vector unsigned int __b) {
+ return __a | __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_or(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a | __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_or(vector unsigned int __a, vector bool int __b) {
+ return __a | (vector unsigned int)__b;
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_or(vector bool int __a,
+ vector bool int __b) {
+ return __a | __b;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_or(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_or(vector bool long long __a,
+ vector double __b) {
+ return (vector unsigned long long)__a | (vector unsigned long long)__b;
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
+ vector bool long long __b) {
+ return (vector unsigned long long)__a | (vector unsigned long long)__b;
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
+ vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a | (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_or(vector signed long long __a, vector signed long long __b) {
+ return __a | __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_or(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a | __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_or(vector signed long long __a, vector bool long long __b) {
+ return __a | (vector signed long long)__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_or(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a | __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_or(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a | __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_or(vector unsigned long long __a, vector bool long long __b) {
+ return __a | (vector unsigned long long)__b;
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_or(vector bool long long __a, vector bool long long __b) {
+ return __a | __b;
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed char __ATTRS_o_ai
+vec_orc(vector signed char __a, vector signed char __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_orc(vector signed char __a, vector bool char __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_orc(vector bool char __a, vector signed char __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_orc(vector unsigned char __a, vector unsigned char __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_orc(vector unsigned char __a, vector bool char __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_orc(vector bool char __a, vector unsigned char __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
+ vector bool char __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_orc(vector signed short __a, vector signed short __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_orc(vector signed short __a, vector bool short __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_orc(vector bool short __a, vector signed short __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_orc(vector unsigned short __a, vector unsigned short __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_orc(vector unsigned short __a, vector bool short __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_orc(vector bool short __a, vector unsigned short __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_orc(vector bool short __a, vector bool short __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_orc(vector signed int __a, vector signed int __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
+ vector bool int __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_orc(vector bool int __a, vector signed int __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_orc(vector unsigned int __a, vector unsigned int __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_orc(vector unsigned int __a, vector bool int __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_orc(vector bool int __a, vector unsigned int __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
+ vector bool int __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_orc(vector signed long long __a, vector signed long long __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_orc(vector signed long long __a, vector bool long long __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector signed long long __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_orc(vector unsigned long long __a, vector bool long long __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector unsigned long long __b) {
+ return __a | ~__b;
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector bool long long __b) {
+ return __a | ~__b;
+}
+#endif
+
+/* vec_vor */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vor(vector signed char __a, vector signed char __b) {
+ return __a | __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vor(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a | __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vor(vector signed char __a, vector bool char __b) {
+ return __a | (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vor(vector unsigned char __a, vector unsigned char __b) {
+ return __a | __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vor(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a | __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vor(vector unsigned char __a, vector bool char __b) {
+ return __a | (vector unsigned char)__b;
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_vor(vector bool char __a,
+ vector bool char __b) {
+ return __a | __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
+ vector short __b) {
+ return __a | __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vor(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a | __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
+ vector bool short __b) {
+ return __a | (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vor(vector unsigned short __a, vector unsigned short __b) {
+ return __a | __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vor(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a | __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vor(vector unsigned short __a, vector bool short __b) {
+ return __a | (vector unsigned short)__b;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vor(vector bool short __a, vector bool short __b) {
+ return __a | __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
+ vector int __b) {
+ return __a | __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vor(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a | __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
+ vector bool int __b) {
+ return __a | (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vor(vector unsigned int __a, vector unsigned int __b) {
+ return __a | __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vor(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a | __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vor(vector unsigned int __a, vector bool int __b) {
+ return __a | (vector unsigned int)__b;
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_vor(vector bool int __a,
+ vector bool int __b) {
+ return __a | __b;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vor(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vor(vector signed long long __a, vector signed long long __b) {
+ return __a | __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vor(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a | __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vor(vector signed long long __a, vector bool long long __b) {
+ return __a | (vector signed long long)__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vor(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a | __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vor(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a | __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vor(vector unsigned long long __a, vector bool long long __b) {
+ return __a | (vector unsigned long long)__b;
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vor(vector bool long long __a, vector bool long long __b) {
+ return __a | __b;
+}
+#endif
+
+/* vec_pack */
+
+/* The various vector pack instructions have a big-endian bias, so for
+ little endian we must handle reversed element numbering. */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_pack(vector signed short __a, vector signed short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_pack(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_pack(vector bool short __a, vector bool short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_pack(vector int __a,
+ vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_pack(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static __inline__ vector bool short __ATTRS_o_ai vec_pack(vector bool int __a,
+ vector bool int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector signed int __ATTRS_o_ai
+vec_pack(vector signed long long __a, vector signed long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector signed int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_pack(vector bool long long __a, vector bool long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector bool int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+#endif
+
+/* vec_vpkuhum */
+
+#define __builtin_altivec_vpkuhum vec_vpkuhum
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vpkuhum(vector signed short __a, vector signed short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vpkuhum(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vpkuhum(vector bool short __a, vector bool short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+/* vec_vpkuwum */
+
+#define __builtin_altivec_vpkuwum vec_vpkuwum
+
+static __inline__ vector short __ATTRS_o_ai vec_vpkuwum(vector int __a,
+ vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vpkuwum(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vpkuwum(vector bool int __a, vector bool int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+/* vec_vpkudum */
+
+#ifdef __POWER8_VECTOR__
+#define __builtin_altivec_vpkudum vec_vpkudum
+
+static __inline__ vector int __ATTRS_o_ai vec_vpkudum(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vpkudum(vector bool long long __a, vector bool long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)vec_perm(
+ (vector long long)__a, (vector long long)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector bool int)vec_perm(
+ (vector long long)__a, (vector long long)__b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+#endif
+
+/* vec_packpx */
+
+static __inline__ vector pixel __attribute__((__always_inline__))
+vec_packpx(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
+#else
+ return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
+#endif
+}
+
+/* vec_vpkpx */
+
+static __inline__ vector pixel __attribute__((__always_inline__))
+vec_vpkpx(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
+#else
+ return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
+#endif
+}
+
+/* vec_packs */
+
+static __inline__ vector signed char __ATTRS_o_ai vec_packs(vector short __a,
+ vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshss(__b, __a);
+#else
+ return __builtin_altivec_vpkshss(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_packs(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+static __inline__ vector signed short __ATTRS_o_ai vec_packs(vector int __a,
+ vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswss(__b, __a);
+#else
+ return __builtin_altivec_vpkswss(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_packs(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector int __ATTRS_o_ai vec_packs(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdss(__b, __a);
+#else
+ return __builtin_altivec_vpksdss(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkudus(__b, __a);
+#else
+ return __builtin_altivec_vpkudus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkshss */
+
+static __inline__ vector signed char __attribute__((__always_inline__))
+vec_vpkshss(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshss(__b, __a);
+#else
+ return __builtin_altivec_vpkshss(__a, __b);
+#endif
+}
+
+/* vec_vpksdss */
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector int __ATTRS_o_ai vec_vpksdss(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdss(__b, __a);
+#else
+ return __builtin_altivec_vpksdss(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkuhus */
+
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+/* vec_vpkudus */
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkudus(__b, __a);
+#else
+ return __builtin_altivec_vpkudus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkswss */
+
+static __inline__ vector signed short __attribute__((__always_inline__))
+vec_vpkswss(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswss(__b, __a);
+#else
+ return __builtin_altivec_vpkswss(__a, __b);
+#endif
+}
+
+/* vec_vpkuwus */
+
+static __inline__ vector unsigned short __attribute__((__always_inline__))
+vec_vpkuwus(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+/* vec_packsu */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_packsu(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshus(__b, __a);
+#else
+ return __builtin_altivec_vpkshus(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_packsu(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_packsu(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswus(__b, __a);
+#else
+ return __builtin_altivec_vpkswus(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_packsu(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_packsu(vector long long __a, vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdus(__b, __a);
+#else
+ return __builtin_altivec_vpksdus(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkudus(__b, __a);
+#else
+ return __builtin_altivec_vpkudus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkshus */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vpkshus(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshus(__b, __a);
+#else
+ return __builtin_altivec_vpkshus(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vpkshus(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+/* vec_vpkswus */
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vpkswus(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswus(__b, __a);
+#else
+ return __builtin_altivec_vpkswus(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vpkswus(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+/* vec_vpksdus */
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vpksdus(vector long long __a, vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdus(__b, __a);
+#else
+ return __builtin_altivec_vpksdus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_perm */
+
+// The vperm instruction is defined architecturally with a big-endian bias.
+// For little endian, we swap the input operands and invert the permute
+// control vector. Only the rightmost 5 bits matter, so we could use
+// a vector of all 31s instead of all 255s to perform the inversion.
+// However, when the PCV is not a constant, using 255 has an advantage
+// in that the vec_xor can be recognized as a vec_nor (and for P8 and
+// later, possibly a vec_nand).
+
+static __inline__ vector signed char __ATTRS_o_ai vec_perm(
+ vector signed char __a, vector signed char __b, vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed char)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector signed char)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_perm(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned char)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned char)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool char)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector bool char)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
+ vector signed short __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed short)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector signed short)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_perm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned short)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned short)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector bool short __ATTRS_o_ai vec_perm(
+ vector bool short __a, vector bool short __b, vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool short)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector bool short)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
+ vector pixel __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector pixel)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector pixel)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
+ vector signed int __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed int)__builtin_altivec_vperm_4si(__b, __a, __d);
+#else
+ return (vector signed int)__builtin_altivec_vperm_4si(__a, __b, __c);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_perm(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool int)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector bool int)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
+ vector float __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector float)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector float)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector long long __ATTRS_o_ai
+vec_perm(vector signed long long __a, vector signed long long __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed long long)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector signed long long)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned long long)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned long long)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_perm(vector bool long long __a, vector bool long long __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool long long)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector bool long long)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_perm(vector double __a, vector double __b, vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector double)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector double)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+#endif
+
+/* vec_vperm */
+
+static __inline__ vector signed char __ATTRS_o_ai vec_vperm(
+ vector signed char __a, vector signed char __b, vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vperm(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_vperm(
+ vector bool char __a, vector bool char __b, vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector short __ATTRS_o_ai
+vec_vperm(vector short __a, vector short __b, vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vperm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai vec_vperm(
+ vector bool short __a, vector bool short __b, vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai
+vec_vperm(vector pixel __a, vector pixel __b, vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vperm(vector int __a,
+ vector int __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vperm(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vperm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_vperm(vector float __a, vector float __b, vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+#ifdef __VSX__
+static __inline__ vector long long __ATTRS_o_ai vec_vperm(
+ vector long long __a, vector long long __b, vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vperm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_vperm(vector double __a, vector double __b, vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+#endif
+
+/* vec_re */
+
+static __inline__ vector float __ATTRS_o_ai vec_re(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvresp(__a);
+#else
+ return __builtin_altivec_vrefp(__a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_re(vector double __a) {
+ return __builtin_vsx_xvredp(__a);
+}
+#endif
+
+/* vec_vrefp */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vrefp(vector float __a) {
+ return __builtin_altivec_vrefp(__a);
+}
+
+/* vec_rl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_rl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_rl(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_rl(vector short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vrlh(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_rl(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_rl(vector int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vrlw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_rl(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_rl(vector signed long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vrld(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vrld(__a, __b);
+}
+#endif
+
+/* vec_vrlb */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vrlb(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vrlb(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+/* vec_vrlh */
+
+static __inline__ vector short __ATTRS_o_ai
+vec_vrlh(vector short __a, vector unsigned short __b) {
+ return __builtin_altivec_vrlh(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vrlh(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
+}
+
+/* vec_vrlw */
+
+static __inline__ vector int __ATTRS_o_ai vec_vrlw(vector int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vrlw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vrlw(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
+}
+
+/* vec_round */
+
+static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspi(__a);
+#else
+ return __builtin_altivec_vrfin(__a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
+ return __builtin_vsx_xvrdpi(__a);
+}
+
+/* vec_rint */
+
+static __inline__ vector float __ATTRS_o_ai vec_rint(vector float __a) {
+ return __builtin_vsx_xvrspic(__a);
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a) {
+ return __builtin_vsx_xvrdpic(__a);
+}
+
+/* vec_nearbyint */
+
+static __inline__ vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
+ return __builtin_vsx_xvrspi(__a);
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_nearbyint(vector double __a) {
+ return __builtin_vsx_xvrdpi(__a);
+}
+#endif
+
+/* vec_vrfin */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vrfin(vector float __a) {
+ return __builtin_altivec_vrfin(__a);
+}
+
+/* vec_sqrt */
+
+#ifdef __VSX__
+static __inline__ vector float __ATTRS_o_ai vec_sqrt(vector float __a) {
+ return __builtin_vsx_xvsqrtsp(__a);
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_sqrt(vector double __a) {
+ return __builtin_vsx_xvsqrtdp(__a);
+}
+#endif
+
+/* vec_rsqrte */
+
+static __inline__ vector float __ATTRS_o_ai vec_rsqrte(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrsqrtesp(__a);
+#else
+ return __builtin_altivec_vrsqrtefp(__a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
+ return __builtin_vsx_xvrsqrtedp(__a);
+}
+#endif
+
+/* vec_vrsqrtefp */
+
+static __inline__ __vector float __attribute__((__always_inline__))
+vec_vrsqrtefp(vector float __a) {
+ return __builtin_altivec_vrsqrtefp(__a);
+}
+
+/* vec_sel */
+
+#define __builtin_altivec_vsel_4si vec_sel
+
+static __inline__ vector signed char __ATTRS_o_ai vec_sel(
+ vector signed char __a, vector signed char __b, vector unsigned char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sel(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai vec_sel(
+ vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
+ return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
+ return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
+ vector bool char __b,
+ vector bool char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
+ vector short __b,
+ vector unsigned short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
+ vector short __b,
+ vector bool short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+ vector bool short __c) {
+ return (__a & ~(vector unsigned short)__c) |
+ (__b & (vector unsigned short)__c);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai vec_sel(
+ vector bool short __a, vector bool short __b, vector unsigned short __c) {
+ return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
+ vector int __b,
+ vector unsigned int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
+ vector int __b,
+ vector bool int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_sel(
+ vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
+ return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
+ return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
+ vector bool int __b,
+ vector bool int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
+ vector float __b,
+ vector unsigned int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
+ vector float __b,
+ vector bool int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai
+vec_sel(vector double __a, vector double __b, vector bool long long __c) {
+ vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
+ ((vector long long)__b & (vector long long)__c);
+ return (vector double)__res;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
+ vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
+ ((vector long long)__b & (vector long long)__c);
+ return (vector double)__res;
+}
+#endif
+
+/* vec_vsel */
+
+static __inline__ vector signed char __ATTRS_o_ai vec_vsel(
+ vector signed char __a, vector signed char __b, vector unsigned char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsel(vector signed char __a, vector signed char __b, vector bool char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsel(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai vec_vsel(
+ vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
+ return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
+ return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
+ vector bool char __b,
+ vector bool char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector short __ATTRS_o_ai
+vec_vsel(vector short __a, vector short __b, vector unsigned short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsel(vector short __a,
+ vector short __b,
+ vector bool short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsel(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsel(vector unsigned short __a, vector unsigned short __b,
+ vector bool short __c) {
+ return (__a & ~(vector unsigned short)__c) |
+ (__b & (vector unsigned short)__c);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai vec_vsel(
+ vector bool short __a, vector bool short __b, vector unsigned short __c) {
+ return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsel(vector bool short __a, vector bool short __b, vector bool short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
+ vector int __b,
+ vector unsigned int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
+ vector int __b,
+ vector bool int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
+ vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
+ vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
+ return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
+ return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
+ vector bool int __b,
+ vector bool int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
+ vector float __b,
+ vector unsigned int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
+ vector float __b,
+ vector bool int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+/* vec_sl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sl(vector signed char __a, vector unsigned char __b) {
+ return __a << (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sl(vector unsigned char __a, vector unsigned char __b) {
+ return __a << __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sl(vector short __a,
+ vector unsigned short __b) {
+ return __a << (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sl(vector unsigned short __a, vector unsigned short __b) {
+ return __a << __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sl(vector int __a,
+ vector unsigned int __b) {
+ return __a << (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sl(vector unsigned int __a, vector unsigned int __b) {
+ return __a << __b;
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sl(vector signed long long __a, vector unsigned long long __b) {
+ return __a << (vector long long)__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sl(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a << __b;
+}
+#endif
+
+/* vec_vslb */
+
+#define __builtin_altivec_vslb vec_vslb
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vslb(vector signed char __a, vector unsigned char __b) {
+ return vec_sl(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vslb(vector unsigned char __a, vector unsigned char __b) {
+ return vec_sl(__a, __b);
+}
+
+/* vec_vslh */
+
+#define __builtin_altivec_vslh vec_vslh
+
+static __inline__ vector short __ATTRS_o_ai
+vec_vslh(vector short __a, vector unsigned short __b) {
+ return vec_sl(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vslh(vector unsigned short __a, vector unsigned short __b) {
+ return vec_sl(__a, __b);
+}
+
+/* vec_vslw */
+
+#define __builtin_altivec_vslw vec_vslw
+
+static __inline__ vector int __ATTRS_o_ai vec_vslw(vector int __a,
+ vector unsigned int __b) {
+ return vec_sl(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vslw(vector unsigned int __a, vector unsigned int __b) {
+ return vec_sl(__a, __b);
+}
+
+/* vec_sld */
+
+#define __builtin_altivec_vsldoi_4si vec_sld
+
+static __inline__ vector signed char __ATTRS_o_ai vec_sld(
+ vector signed char __a, vector signed char __b, unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sld(vector unsigned char __a, vector unsigned char __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_sld(vector bool char __a, vector bool char __b, unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector signed short __ATTRS_o_ai vec_sld(
+ vector signed short __a, vector signed short __b, unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sld(vector unsigned short __a, vector unsigned short __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_sld(vector bool short __a, vector bool short __b, unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sld(vector pixel __a,
+ vector pixel __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_sld(vector signed int __a, vector signed int __b, unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_sld(
+ vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
+ vector bool int __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_sld(vector float __a,
+ vector float __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+/* vec_vsldoi */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsldoi(vector signed char __a, vector signed char __b, unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai vec_vsldoi(
+ vector unsigned char __a, vector unsigned char __b, unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsldoi(vector short __a,
+ vector short __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai vec_vsldoi(
+ vector unsigned short __a, vector unsigned short __b, unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a,
+ vector pixel __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsldoi(vector int __a,
+ vector int __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_vsldoi(
+ vector unsigned int __a, vector unsigned int __b, unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vsldoi(vector float __a,
+ vector float __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+/* vec_sll */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sll(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sll(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sll(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char __a, vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char __a, vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_sll(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_sll(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_sll(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_sll(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_sll(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_sll(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_sll(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_sll(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_sll(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_vsl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char __a, vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char __a, vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsl(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsl(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsl(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsl(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsl(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsl(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsl(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsl(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsl(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_slo */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_slo(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_slo(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_slo(vector unsigned char __a, vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_slo(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_slo(vector unsigned short __a, vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_slo(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
+ vector signed char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_slo(vector unsigned int __a, vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_slo(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+/* vec_vslo */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vslo(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vslo(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vslo(vector unsigned char __a, vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vslo(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vslo(vector unsigned short __a, vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vslo(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
+ vector signed char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vslo(vector unsigned int __a, vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vslo(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+/* vec_splat */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_splat(vector signed char __a, unsigned const int __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_splat(vector unsigned char __a, unsigned const int __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_splat(vector bool char __a, unsigned const int __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_splat(vector signed short __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_splat(vector unsigned short __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_splat(vector bool short __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1));
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_splat(vector pixel __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1));
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_splat(vector signed int __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
+ b2, b3, b0, b1, b2, b3));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_splat(vector unsigned int __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
+ b2, b3, b0, b1, b2, b3));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_splat(vector bool int __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
+ b2, b3, b0, b1, b2, b3));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_splat(vector float __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
+ b2, b3, b0, b1, b2, b3));
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_splat(vector double __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
+ b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
+ b2, b3, b4, b5, b6, b7));
+}
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_splat(vector bool long long __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
+ b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
+ b2, b3, b4, b5, b6, b7));
+}
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_splat(vector signed long long __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
+ b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
+ b2, b3, b4, b5, b6, b7));
+}
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_splat(vector unsigned long long __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
+ b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
+ b2, b3, b4, b5, b6, b7));
+}
+#endif
+
+/* vec_vspltb */
+
+#define __builtin_altivec_vspltb vec_vspltb
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vspltb(vector signed char __a, unsigned char __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vspltb(vector unsigned char __a, unsigned char __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a,
+ unsigned char __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b));
+}
+
+/* vec_vsplth */
+
+#define __builtin_altivec_vsplth vec_vsplth
+
+static __inline__ vector short __ATTRS_o_ai vec_vsplth(vector short __a,
+ unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsplth(vector unsigned short __a, unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsplth(vector bool short __a, unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a,
+ unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+/* vec_vspltw */
+
+#define __builtin_altivec_vspltw vec_vspltw
+
+static __inline__ vector int __ATTRS_o_ai vec_vspltw(vector int __a,
+ unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vspltw(vector unsigned int __a, unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a,
+ unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vspltw(vector float __a,
+ unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+/* vec_splat_s8 */
+
+#define __builtin_altivec_vspltisb vec_splat_s8
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static __inline__ vector signed char __ATTRS_o_ai
+vec_splat_s8(signed char __a) {
+ return (vector signed char)(__a);
+}
+
+/* vec_vspltisb */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vspltisb(signed char __a) {
+ return (vector signed char)(__a);
+}
+
+/* vec_splat_s16 */
+
+#define __builtin_altivec_vspltish vec_splat_s16
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static __inline__ vector short __ATTRS_o_ai vec_splat_s16(signed char __a) {
+ return (vector short)(__a);
+}
+
+/* vec_vspltish */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static __inline__ vector short __ATTRS_o_ai vec_vspltish(signed char __a) {
+ return (vector short)(__a);
+}
+
+/* vec_splat_s32 */
+
+#define __builtin_altivec_vspltisw vec_splat_s32
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static __inline__ vector int __ATTRS_o_ai vec_splat_s32(signed char __a) {
+ return (vector int)(__a);
+}
+
+/* vec_vspltisw */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static __inline__ vector int __ATTRS_o_ai vec_vspltisw(signed char __a) {
+ return (vector int)(__a);
+}
+
+/* vec_splat_u8 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_splat_u8(unsigned char __a) {
+ return (vector unsigned char)(__a);
+}
+
+/* vec_splat_u16 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_splat_u16(signed char __a) {
+ return (vector unsigned short)(__a);
+}
+
+/* vec_splat_u32 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_splat_u32(signed char __a) {
+ return (vector unsigned int)(__a);
+}
+
+/* vec_sr */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sr(vector signed char __a, vector unsigned char __b) {
+ vector unsigned char __res = (vector unsigned char)__a >> __b;
+ return (vector signed char)__res;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sr(vector unsigned char __a, vector unsigned char __b) {
+ return __a >> __b;
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_sr(vector signed short __a, vector unsigned short __b) {
+ vector unsigned short __res = (vector unsigned short)__a >> __b;
+ return (vector signed short)__res;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sr(vector unsigned short __a, vector unsigned short __b) {
+ return __a >> __b;
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_sr(vector signed int __a, vector unsigned int __b) {
+ vector unsigned int __res = (vector unsigned int)__a >> __b;
+ return (vector signed int)__res;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sr(vector unsigned int __a, vector unsigned int __b) {
+ return __a >> __b;
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sr(vector signed long long __a, vector unsigned long long __b) {
+ vector unsigned long long __res = (vector unsigned long long)__a >> __b;
+ return (vector signed long long)__res;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a >> __b;
+}
+#endif
+
+/* vec_vsrb */
+
+#define __builtin_altivec_vsrb vec_vsrb
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsrb(vector signed char __a, vector unsigned char __b) {
+ return __a >> (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
+ return __a >> __b;
+}
+
+/* vec_vsrh */
+
+#define __builtin_altivec_vsrh vec_vsrh
+
+static __inline__ vector short __ATTRS_o_ai
+vec_vsrh(vector short __a, vector unsigned short __b) {
+ return __a >> (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
+ return __a >> __b;
+}
+
+/* vec_vsrw */
+
+#define __builtin_altivec_vsrw vec_vsrw
+
+static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a,
+ vector unsigned int __b) {
+ return __a >> (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsrw(vector unsigned int __a, vector unsigned int __b) {
+ return __a >> __b;
+}
+
+/* vec_sra */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sra(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sra(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sra(vector short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sra(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sra(vector int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsraw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sra(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sra(vector signed long long __a, vector unsigned long long __b) {
+ return __a >> __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)((vector signed long long)__a >> __b);
+}
+#endif
+
+/* vec_vsrab */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsrab(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsrab(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+/* vec_vsrah */
+
+static __inline__ vector short __ATTRS_o_ai
+vec_vsrah(vector short __a, vector unsigned short __b) {
+ return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsrah(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
+}
+
+/* vec_vsraw */
+
+static __inline__ vector int __ATTRS_o_ai vec_vsraw(vector int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsraw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsraw(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
+}
+
+/* vec_srl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_srl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_srl(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_srl(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char __a, vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char __a, vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_srl(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_srl(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_srl(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_srl(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_srl(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_srl(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_srl(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_srl(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_srl(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_vsr */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char __a, vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char __a, vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsr(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsr(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsr(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsr(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsr(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsr(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsr(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsr(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsr(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_sro */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sro(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sro(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sro(vector unsigned char __a, vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sro(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sro(vector unsigned short __a, vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sro(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
+ vector signed char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sro(vector unsigned int __a, vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sro(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+/* vec_vsro */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsro(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsro(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsro(vector unsigned char __a, vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsro(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsro(vector unsigned short __a, vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsro(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
+ vector signed char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsro(vector unsigned int __a, vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsro(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+/* vec_st */
+
+static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
+ vector short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+ vector pixel *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b,
+ vector int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+ int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
+ float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+/* vec_stvx */
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
+ vector short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+ vector pixel *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
+ vector int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
+ int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+ int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
+ float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+/* vec_ste */
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvehx(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvewx(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
+ int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_ste(vector float __a, int __b,
+ float *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+/* vec_stvebx */
+
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector unsigned char __a,
+ int __b, unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+/* vec_stvehx */
+
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvehx(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector unsigned short __a,
+ int __b, unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+/* vec_stvewx */
+
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector int __a, int __b,
+ int *__c) {
+ __builtin_altivec_stvewx(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
+ int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector float __a, int __b,
+ float *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+/* vec_stl */
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
+ vector short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
+ vector pixel *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b,
+ vector int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
+ int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
+ float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+/* vec_stvxl */
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
+ vector short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
+ int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
+ int __b, unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
+ vector pixel *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
+ vector int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
+ int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
+ int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
+ float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+/* vec_sub */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sub(vector signed char __a, vector signed char __b) {
+ return __a - __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sub(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a - __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sub(vector signed char __a, vector bool char __b) {
+ return __a - (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sub(vector unsigned char __a, vector unsigned char __b) {
+ return __a - __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sub(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a - __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sub(vector unsigned char __a, vector bool char __b) {
+ return __a - (vector unsigned char)__b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
+ vector short __b) {
+ return __a - __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sub(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a - __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
+ vector bool short __b) {
+ return __a - (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sub(vector unsigned short __a, vector unsigned short __b) {
+ return __a - __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sub(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a - __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sub(vector unsigned short __a, vector bool short __b) {
+ return __a - (vector unsigned short)__b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
+ vector int __b) {
+ return __a - __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sub(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a - __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
+ vector bool int __b) {
+ return __a - (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sub(vector unsigned int __a, vector unsigned int __b) {
+ return __a - __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sub(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a - __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sub(vector unsigned int __a, vector bool int __b) {
+ return __a - (vector unsigned int)__b;
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sub(vector signed __int128 __a, vector signed __int128 __b) {
+ return __a - __b;
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a - __b;
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sub(vector signed long long __a, vector signed long long __b) {
+ return __a - __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sub(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a - __b;
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_sub(vector double __a,
+ vector double __b) {
+ return __a - __b;
+}
+#endif
+
+static __inline__ vector float __ATTRS_o_ai vec_sub(vector float __a,
+ vector float __b) {
+ return __a - __b;
+}
+
+/* vec_vsububm */
+
+#define __builtin_altivec_vsububm vec_vsububm
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsububm(vector signed char __a, vector signed char __b) {
+ return __a - __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsububm(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a - __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsububm(vector signed char __a, vector bool char __b) {
+ return __a - (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububm(vector unsigned char __a, vector unsigned char __b) {
+ return __a - __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububm(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a - __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububm(vector unsigned char __a, vector bool char __b) {
+ return __a - (vector unsigned char)__b;
+}
+
+/* vec_vsubuhm */
+
+#define __builtin_altivec_vsubuhm vec_vsubuhm
+
+static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
+ vector short __b) {
+ return __a - __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a - __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
+ vector bool short __b) {
+ return __a - (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector unsigned short __a, vector unsigned short __b) {
+ return __a - __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a - __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector unsigned short __a, vector bool short __b) {
+ return __a - (vector unsigned short)__b;
+}
+
+/* vec_vsubuwm */
+
+#define __builtin_altivec_vsubuwm vec_vsubuwm
+
+static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
+ vector int __b) {
+ return __a - __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a - __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
+ vector bool int __b) {
+ return __a - (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuwm(vector unsigned int __a, vector unsigned int __b) {
+ return __a - __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuwm(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a - __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuwm(vector unsigned int __a, vector bool int __b) {
+ return __a - (vector unsigned int)__b;
+}
+
+/* vec_vsubfp */
+
+#define __builtin_altivec_vsubfp vec_vsubfp
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vsubfp(vector float __a, vector float __b) {
+ return __a - __b;
+}
+
+/* vec_subc */
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_subc(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vsubcuw(__a, __b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_vsubcuw */
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vsubcuw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vsubcuw(__a, __b);
+}
+
+/* vec_subs */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_subs(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vsubsbs(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_subs(vector bool char __a, vector signed char __b) {
+ return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_subs(vector signed char __a, vector bool char __b) {
+ return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_subs(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vsububs(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_subs(vector bool char __a, vector unsigned char __b) {
+ return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_subs(vector unsigned char __a, vector bool char __b) {
+ return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vsubshs(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_subs(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vsubshs((vector short)__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vsubshs(__a, (vector short)__b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_subs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_subs(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_subs(vector unsigned short __a, vector bool short __b) {
+ return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
+ vector int __b) {
+ return __builtin_altivec_vsubsws(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_subs(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vsubsws((vector int)__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vsubsws(__a, (vector int)__b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_subs(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vsubuws(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_subs(vector bool int __a, vector unsigned int __b) {
+ return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_subs(vector unsigned int __a, vector bool int __b) {
+ return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
+}
+
+/* vec_vsubsbs */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsubsbs(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vsubsbs(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsubsbs(vector bool char __a, vector signed char __b) {
+ return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsubsbs(vector signed char __a, vector bool char __b) {
+ return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
+}
+
+/* vec_vsububs */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububs(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vsububs(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububs(vector bool char __a, vector unsigned char __b) {
+ return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsububs(vector unsigned char __a, vector bool char __b) {
+ return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
+}
+
+/* vec_vsubshs */
+
+static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vsubshs(__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vsubshs((vector short)__a, __b);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vsubshs(__a, (vector short)__b);
+}
+
+/* vec_vsubuhs */
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector unsigned short __a, vector bool short __b) {
+ return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
+}
+
+/* vec_vsubsws */
+
+static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
+ vector int __b) {
+ return __builtin_altivec_vsubsws(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vsubsws((vector int)__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vsubsws(__a, (vector int)__b);
+}
+
+/* vec_vsubuws */
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuws(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vsubuws(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuws(vector bool int __a, vector unsigned int __b) {
+ return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsubuws(vector unsigned int __a, vector bool int __b) {
+ return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+/* vec_vsubuqm */
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) {
+ return __a - __b;
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a - __b;
+}
+
+/* vec_vsubeuqm */
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vsubeuqm(__a, __b, __c);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vsubeuqm(__a, __b, __c);
+}
+
+/* vec_vsubcuq */
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+
+/* vec_vsubecuq */
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vsubecuq(__a, __b, __c);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vsubecuq(__a, __b, __c);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_sum4s */
+
+static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed char __a,
+ vector int __b) {
+ return __builtin_altivec_vsum4sbs(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sum4s(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_altivec_vsum4ubs(__a, __b);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed short __a,
+ vector int __b) {
+ return __builtin_altivec_vsum4shs(__a, __b);
+}
+
+/* vec_vsum4sbs */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_vsum4sbs(vector signed char __a, vector int __b) {
+ return __builtin_altivec_vsum4sbs(__a, __b);
+}
+
+/* vec_vsum4ubs */
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_vsum4ubs(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_altivec_vsum4ubs(__a, __b);
+}
+
+/* vec_vsum4shs */
+
+static __inline__ vector int __attribute__((__always_inline__))
+vec_vsum4shs(vector signed short __a, vector int __b) {
+ return __builtin_altivec_vsum4shs(__a, __b);
+}
+
+/* vec_sum2s */
+
+/* The vsum2sws instruction has a big-endian bias, so that the second
+ input vector and the result always reference big-endian elements
+ 1 and 3 (little-endian element 0 and 2). For ease of porting the
+ programmer wants elements 1 and 3 in both cases, so for little
+ endian we must perform some permutes. */
+
+static __inline__ vector signed int __attribute__((__always_inline__))
+vec_sum2s(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ vector int __c = (vector signed int)vec_perm(
+ __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+ __c = __builtin_altivec_vsum2sws(__a, __c);
+ return (vector signed int)vec_perm(
+ __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+#else
+ return __builtin_altivec_vsum2sws(__a, __b);
+#endif
+}
+
+/* vec_vsum2sws */
+
+static __inline__ vector signed int __attribute__((__always_inline__))
+vec_vsum2sws(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ vector int __c = (vector signed int)vec_perm(
+ __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+ __c = __builtin_altivec_vsum2sws(__a, __c);
+ return (vector signed int)vec_perm(
+ __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+#else
+ return __builtin_altivec_vsum2sws(__a, __b);
+#endif
+}
+
+/* vec_sums */
+
+/* The vsumsws instruction has a big-endian bias, so that the second
+ input vector and the result always reference big-endian element 3
+ (little-endian element 0). For ease of porting the programmer
+ wants element 3 in both cases, so for little endian we must perform
+ some permutes. */
+
+static __inline__ vector signed int __attribute__((__always_inline__))
+vec_sums(vector signed int __a, vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ __b = (vector signed int)vec_splat(__b, 3);
+ __b = __builtin_altivec_vsumsws(__a, __b);
+ return (vector signed int)(0, 0, 0, __b[0]);
+#else
+ return __builtin_altivec_vsumsws(__a, __b);
+#endif
+}
+
+/* vec_vsumsws */
+
+static __inline__ vector signed int __attribute__((__always_inline__))
+vec_vsumsws(vector signed int __a, vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ __b = (vector signed int)vec_splat(__b, 3);
+ __b = __builtin_altivec_vsumsws(__a, __b);
+ return (vector signed int)(0, 0, 0, __b[0]);
+#else
+ return __builtin_altivec_vsumsws(__a, __b);
+#endif
+}
+
+/* vec_trunc */
+
+static __inline__ vector float __ATTRS_o_ai vec_trunc(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspiz(__a);
+#else
+ return __builtin_altivec_vrfiz(__a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_trunc(vector double __a) {
+ return __builtin_vsx_xvrdpiz(__a);
+}
+#endif
+
+/* vec_vrfiz */
+
+static __inline__ vector float __attribute__((__always_inline__))
+vec_vrfiz(vector float __a) {
+ return __builtin_altivec_vrfiz(__a);
+}
+
+/* vec_unpackh */
+
+/* The vector unpack instructions all have a big-endian bias, so for
+ little endian we must reverse the meanings of "high" and "low." */
+
+static __inline__ vector short __ATTRS_o_ai
+vec_unpackh(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsb((vector char)__a);
+#else
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_unpackh(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_unpackh(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsh(__a);
+#else
+ return __builtin_altivec_vupkhsh(__a);
+#endif
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_unpackh(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_unpackh(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector long long __ATTRS_o_ai vec_unpackh(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsw(__a);
+#else
+ return __builtin_altivec_vupkhsw(__a);
+#endif
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_unpackh(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_vupkhsb */
+
+static __inline__ vector short __ATTRS_o_ai
+vec_vupkhsb(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsb((vector char)__a);
+#else
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vupkhsb(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+/* vec_vupkhsh */
+
+static __inline__ vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsh(__a);
+#else
+ return __builtin_altivec_vupkhsh(__a);
+#endif
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vupkhsh(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vupkhsh(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#endif
+}
+
+/* vec_vupkhsw */
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsw(__a);
+#else
+ return __builtin_altivec_vupkhsw(__a);
+#endif
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vupkhsw(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_unpackl */
+
+static __inline__ vector short __ATTRS_o_ai
+vec_unpackl(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return __builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_unpackl(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_unpackl(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsh(__a);
+#else
+ return __builtin_altivec_vupklsh(__a);
+#endif
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_unpackl(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_unpackl(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector long long __ATTRS_o_ai vec_unpackl(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsw(__a);
+#else
+ return __builtin_altivec_vupklsw(__a);
+#endif
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_unpackl(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_vupklsb */
+
+static __inline__ vector short __ATTRS_o_ai
+vec_vupklsb(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return __builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vupklsb(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+/* vec_vupklsh */
+
+static __inline__ vector int __ATTRS_o_ai vec_vupklsh(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsh(__a);
+#else
+ return __builtin_altivec_vupklsh(__a);
+#endif
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vupklsh(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vupklsh(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#endif
+}
+
+/* vec_vupklsw */
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsw(__a);
+#else
+ return __builtin_altivec_vupklsw(__a);
+#endif
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vupklsw(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_vsx_ld */
+
+#ifdef __VSX__
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector bool int *__b) {
+ return (vector bool int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed int *__b) {
+ return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_vsx_ld(int __a, const signed int *__b) {
+ return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsx_ld(int __a, const unsigned int *__b) {
+ return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector float *__b) {
+ return (vector float)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vsx_ld(int __a,
+ const float *__b) {
+ return (vector float)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed long long *__b) {
+ return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned long long *__b) {
+ return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector double *__b) {
+ return (vector double)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_vsx_ld(int __a, const double *__b) {
+ return (vector double)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector bool short *__b) {
+ return (vector bool short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed short *__b) {
+ return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_vsx_ld(int __a, const signed short *__b) {
+ return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vsx_ld(int __a, const unsigned short *__b) {
+ return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector bool char *__b) {
+ return (vector bool char)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed char *__b) {
+ return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vsx_ld(int __a, const signed char *__b) {
+ return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vsx_ld(int __a, const unsigned char *__b) {
+ return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+#endif
+
+/* vec_vsx_st */
+
+#ifdef __VSX__
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
+ signed int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
+ vector signed int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
+ signed int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
+ float *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed long long __a,
+ int __b,
+ vector signed long long *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a,
+ int __b,
+ vector unsigned long long *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
+ vector double *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
+ double *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
+ signed short *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
+ vector signed short *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
+ signed short *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
+ int __b,
+ vector unsigned short *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
+ int __b, unsigned short *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
+ int __b,
+ vector unsigned char *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
+ int __b, unsigned char *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+#endif
+
+/* vec_xor */
+
+#define __builtin_altivec_vxor vec_xor
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_xor(vector signed char __a, vector signed char __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_xor(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a ^ __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_xor(vector signed char __a, vector bool char __b) {
+ return __a ^ (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char __a, vector unsigned char __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xor(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a ^ __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char __a, vector bool char __b) {
+ return __a ^ (vector unsigned char)__b;
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_xor(vector bool char __a,
+ vector bool char __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
+ vector short __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_xor(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a ^ __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
+ vector bool short __b) {
+ return __a ^ (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_xor(vector unsigned short __a, vector unsigned short __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_xor(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a ^ __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_xor(vector unsigned short __a, vector bool short __b) {
+ return __a ^ (vector unsigned short)__b;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_xor(vector bool short __a, vector bool short __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
+ vector int __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_xor(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a ^ __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
+ vector bool int __b) {
+ return __a ^ (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_xor(vector unsigned int __a, vector unsigned int __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_xor(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a ^ __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_xor(vector unsigned int __a, vector bool int __b) {
+ return __a ^ (vector unsigned int)__b;
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_xor(vector bool int __a,
+ vector bool int __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_xor(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_xor(vector signed long long __a, vector signed long long __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a ^ __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_xor(vector signed long long __a, vector bool long long __b) {
+ return __a ^ (vector signed long long)__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_xor(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a ^ __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_xor(vector unsigned long long __a, vector bool long long __b) {
+ return __a ^ (vector unsigned long long)__b;
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector bool long long __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_xor(vector double __a,
+ vector double __b) {
+ return (vector double)((vector unsigned long long)__a ^
+ (vector unsigned long long)__b);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_xor(vector double __a, vector bool long long __b) {
+ return (vector double)((vector unsigned long long)__a ^
+ (vector unsigned long long)__b);
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_xor(vector bool long long __a,
+ vector double __b) {
+ return (vector double)((vector unsigned long long)__a ^
+ (vector unsigned long long)__b);
+}
+#endif
+
+/* vec_vxor */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vxor(vector signed char __a, vector signed char __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vxor(vector bool char __a, vector signed char __b) {
+ return (vector signed char)__a ^ __b;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vxor(vector signed char __a, vector bool char __b) {
+ return __a ^ (vector signed char)__b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vxor(vector unsigned char __a, vector unsigned char __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vxor(vector bool char __a, vector unsigned char __b) {
+ return (vector unsigned char)__a ^ __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vxor(vector unsigned char __a, vector bool char __b) {
+ return __a ^ (vector unsigned char)__b;
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a,
+ vector bool char __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
+ vector short __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vxor(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a ^ __b;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
+ vector bool short __b) {
+ return __a ^ (vector short)__b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vxor(vector unsigned short __a, vector unsigned short __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vxor(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a ^ __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_vxor(vector unsigned short __a, vector bool short __b) {
+ return __a ^ (vector unsigned short)__b;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vxor(vector bool short __a, vector bool short __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
+ vector int __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vxor(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a ^ __b;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
+ vector bool int __b) {
+ return __a ^ (vector int)__b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vxor(vector unsigned int __a, vector unsigned int __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vxor(vector bool int __a, vector unsigned int __b) {
+ return (vector unsigned int)__a ^ __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vxor(vector unsigned int __a, vector bool int __b) {
+ return __a ^ (vector unsigned int)__b;
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a,
+ vector bool int __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vxor(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vxor(vector signed long long __a, vector signed long long __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vxor(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a ^ __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_vxor(vector signed long long __a, vector bool long long __b) {
+ return __a ^ (vector signed long long)__b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vxor(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a ^ __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vxor(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a ^ __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_vxor(vector unsigned long long __a, vector bool long long __b) {
+ return __a ^ (vector unsigned long long)__b;
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_vxor(vector bool long long __a, vector bool long long __b) {
+ return __a ^ __b;
+}
+#endif
+
+/* ------------------------ extensions for CBEA ----------------------------- */
+
+/* vec_extract */
+
+static __inline__ signed char __ATTRS_o_ai vec_extract(vector signed char __a,
+ int __b) {
+ return __a[__b];
+}
+
+static __inline__ unsigned char __ATTRS_o_ai
+vec_extract(vector unsigned char __a, int __b) {
+ return __a[__b];
+}
+
+static __inline__ unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
+ int __b) {
+ return __a[__b];
+}
+
+static __inline__ signed short __ATTRS_o_ai vec_extract(vector signed short __a,
+ int __b) {
+ return __a[__b];
+}
+
+static __inline__ unsigned short __ATTRS_o_ai
+vec_extract(vector unsigned short __a, int __b) {
+ return __a[__b];
+}
+
+static __inline__ unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
+ int __b) {
+ return __a[__b];
+}
+
+static __inline__ signed int __ATTRS_o_ai vec_extract(vector signed int __a,
+ int __b) {
+ return __a[__b];
+}
+
+static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a,
+ int __b) {
+ return __a[__b];
+}
+
+static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector bool int __a,
+ int __b) {
+ return __a[__b];
+}
+
+#ifdef __VSX__
+static __inline__ signed long long __ATTRS_o_ai
+vec_extract(vector signed long long __a, int __b) {
+ return __a[__b];
+}
+
+static __inline__ unsigned long long __ATTRS_o_ai
+vec_extract(vector unsigned long long __a, int __b) {
+ return __a[__b];
+}
+
+static __inline__ unsigned long long __ATTRS_o_ai
+vec_extract(vector bool long long __a, int __b) {
+ return __a[__b];
+}
+
+static __inline__ double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
+ return __a[__b];
+}
+#endif
+
+static __inline__ float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
+ return __a[__b];
+}
+
+/* vec_insert */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_insert(signed char __a, vector signed char __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_insert(unsigned char __a, vector unsigned char __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static __inline__ vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
+ vector bool char __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_insert(signed short __a, vector signed short __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_insert(unsigned short __a, vector unsigned short __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_insert(unsigned short __a, vector bool short __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_insert(signed int __a, vector signed int __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_insert(unsigned int __a, vector unsigned int __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static __inline__ vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
+ vector bool int __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_insert(signed long long __a, vector signed long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+static __inline__ vector double __ATTRS_o_ai vec_insert(double __a,
+ vector double __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+#endif
+
+static __inline__ vector float __ATTRS_o_ai vec_insert(float __a,
+ vector float __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+/* vec_lvlx */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvlx(int __a, const signed char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvlx(int __a, const vector signed char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvlx(int __a, const unsigned char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvlx(int __a, const vector unsigned char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvlx(int __a, const vector bool char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector bool char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
+ const short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
+ const vector short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvlx(int __a, const unsigned short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvlx(int __a, const vector unsigned short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvlx(int __a, const vector bool short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector bool short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_lvlx(int __a,
+ const vector pixel *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector pixel)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a,
+ const vector int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvlx(int __a, const unsigned int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvlx(int __a, const vector unsigned int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvlx(int __a, const vector bool int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector bool int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
+ const float *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
+ const vector float *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector float)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_lvlxl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvlxl(int __a, const signed char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvlxl(int __a, const vector signed char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvlxl(int __a, const unsigned char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvlxl(int __a, const vector unsigned char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvlxl(int __a, const vector bool char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector bool char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
+ const short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
+ const vector short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvlxl(int __a, const unsigned short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvlxl(int __a, const vector unsigned short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvlxl(int __a, const vector bool short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector bool short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_lvlxl(int __a,
+ const vector pixel *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector pixel)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a,
+ const vector int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvlxl(int __a, const unsigned int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvlxl(int __a, const vector unsigned int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvlxl(int __a, const vector bool int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector bool int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
+ const float *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
+ vector float *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector float)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_lvrx */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvrx(int __a, const signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvrx(int __a, const vector signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvrx(int __a, const unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvrx(int __a, const vector unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvrx(int __a, const vector bool char *__b) {
+ return vec_perm((vector bool char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
+ const short *__b) {
+ return vec_perm((vector short)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
+ const vector short *__b) {
+ return vec_perm((vector short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvrx(int __a, const unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvrx(int __a, const vector unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvrx(int __a, const vector bool short *__b) {
+ return vec_perm((vector bool short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_lvrx(int __a,
+ const vector pixel *__b) {
+ return vec_perm((vector pixel)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) {
+ return vec_perm((vector int)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a,
+ const vector int *__b) {
+ return vec_perm((vector int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvrx(int __a, const unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvrx(int __a, const vector unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvrx(int __a, const vector bool int *__b) {
+ return vec_perm((vector bool int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
+ const float *__b) {
+ return vec_perm((vector float)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
+ const vector float *__b) {
+ return vec_perm((vector float)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_lvrxl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvrxl(int __a, const signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_lvrxl(int __a, const vector signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvrxl(int __a, const unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_lvrxl(int __a, const vector unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_lvrxl(int __a, const vector bool char *__b) {
+ return vec_perm((vector bool char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
+ const short *__b) {
+ return vec_perm((vector short)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
+ const vector short *__b) {
+ return vec_perm((vector short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvrxl(int __a, const unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_lvrxl(int __a, const vector unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_lvrxl(int __a, const vector bool short *__b) {
+ return vec_perm((vector bool short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector pixel __ATTRS_o_ai vec_lvrxl(int __a,
+ const vector pixel *__b) {
+ return vec_perm((vector pixel)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) {
+ return vec_perm((vector int)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a,
+ const vector int *__b) {
+ return vec_perm((vector int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvrxl(int __a, const unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_lvrxl(int __a, const vector unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_lvrxl(int __a, const vector bool int *__b) {
+ return vec_perm((vector bool int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
+ const float *__b) {
+ return vec_perm((vector float)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
+ const vector float *__b) {
+ return vec_perm((vector float)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_stvlx */
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
+ short *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
+ vector short *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
+ int __b, unsigned short *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
+ int __b,
+ vector unsigned short *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
+ int *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
+ vector int *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlx(vector float __a, int __b,
+ vector float *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_stvlxl */
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
+ int __b, unsigned char *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
+ int __b,
+ vector unsigned char *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
+ short *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
+ vector short *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
+ int __b, unsigned short *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
+ int __b,
+ vector unsigned short *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
+ int *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
+ vector int *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b,
+ vector float *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_stvrx */
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
+ short *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
+ vector short *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
+ int __b, unsigned short *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
+ int __b,
+ vector unsigned short *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
+ int *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
+ vector int *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrx(vector float __a, int __b,
+ vector float *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_stvrxl */
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
+ int __b, unsigned char *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
+ int __b,
+ vector unsigned char *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
+ short *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
+ vector short *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
+ int __b, unsigned short *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
+ int __b,
+ vector unsigned short *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
+ int *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
+ vector int *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
+ vector float *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_promote */
+
+static __inline__ vector signed char __ATTRS_o_ai vec_promote(signed char __a,
+ int __b) {
+ vector signed char __res = (vector signed char)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_promote(unsigned char __a, int __b) {
+ vector unsigned char __res = (vector unsigned char)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
+ vector short __res = (vector short)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_promote(unsigned short __a, int __b) {
+ vector unsigned short __res = (vector unsigned short)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
+ vector int __res = (vector int)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a,
+ int __b) {
+ vector unsigned int __res = (vector unsigned int)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
+ vector float __res = (vector float)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+/* vec_splats */
+
+static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a) {
+ return (vector signed char)(__a);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_splats(unsigned char __a) {
+ return (vector unsigned char)(__a);
+}
+
+static __inline__ vector short __ATTRS_o_ai vec_splats(short __a) {
+ return (vector short)(__a);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_splats(unsigned short __a) {
+ return (vector unsigned short)(__a);
+}
+
+static __inline__ vector int __ATTRS_o_ai vec_splats(int __a) {
+ return (vector int)(__a);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_splats(unsigned int __a) {
+ return (vector unsigned int)(__a);
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_splats(signed long long __a) {
+ return (vector signed long long)(__a);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_splats(unsigned long long __a) {
+ return (vector unsigned long long)(__a);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_splats(signed __int128 __a) {
+ return (vector signed __int128)(__a);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_splats(unsigned __int128 __a) {
+ return (vector unsigned __int128)(__a);
+}
+
+#endif
+
+static __inline__ vector double __ATTRS_o_ai vec_splats(double __a) {
+ return (vector double)(__a);
+}
+#endif
+
+static __inline__ vector float __ATTRS_o_ai vec_splats(float __a) {
+ return (vector float)(__a);
+}
+
+/* ----------------------------- predicates --------------------------------- */
+
+/* vec_all_eq */
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector pixel __a,
+ vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+ vector long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b);
+}
+#endif
+
+/* vec_all_ge */
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, (vector unsigned int)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a);
+}
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b,
+ (vector unsigned long long)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b);
+}
+#endif
+
+/* vec_all_gt */
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, (vector signed char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
+ __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, (vector unsigned int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b);
+}
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a,
+ (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a,
+ (vector unsigned long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
+ __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b);
+}
+#endif
+
+/* vec_all_in */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_all_in(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp_p(__CR6_EQ, __a, __b);
+}
+
+/* vec_all_le */
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, (vector signed char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
+ __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, (vector unsigned int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a,
+ (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a,
+ (vector unsigned long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
+ __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_le(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a);
+}
+#endif
+
+/* vec_all_lt */
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, (vector unsigned int)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b,
+ (vector unsigned long long)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a);
+}
+#endif
+
+/* vec_all_nan */
+
+static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_nan(vector double __a) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a);
+}
+#endif
+
+/* vec_all_ne */
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector pixel __a,
+ vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a,
+ (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
+/* vec_all_nge */
+
+static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_nge(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
+/* vec_all_ngt */
+
+static __inline__ int __ATTRS_o_ai vec_all_ngt(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_ngt(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
+/* vec_all_nle */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_all_nle(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a);
+}
+
+/* vec_all_nlt */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_all_nlt(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a);
+}
+
+/* vec_all_numeric */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_all_numeric(vector float __a) {
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a);
+}
+
+/* vec_any_eq */
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector pixel __a,
+ vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a,
+ (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
+/* vec_any_ge */
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b,
+ (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b,
+ (vector unsigned int)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV,
+ (vector signed long long)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b,
+ (vector unsigned long long)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
+/* vec_any_gt */
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a,
+ (vector signed char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
+ __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
+ __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
+ __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a,
+ (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a,
+ (vector unsigned long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
+/* vec_any_le */
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a,
+ (vector signed char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
+ __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
+ __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
+ __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a,
+ (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a,
+ (vector unsigned long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_le(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a);
+}
+#endif
+
+/* vec_any_lt */
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b,
+ (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
+ __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b,
+ (vector unsigned int)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV,
+ (vector signed long long)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b,
+ (vector unsigned long long)__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a);
+}
+#endif
+
+/* vec_any_nan */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_any_nan(vector float __a) {
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a);
+}
+
+/* vec_any_ne */
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector pixel __a,
+ vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a,
+ (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+#endif
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
+/* vec_any_nge */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_any_nge(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b);
+}
+
+/* vec_any_ngt */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_any_ngt(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b);
+}
+
+/* vec_any_nle */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_any_nle(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a);
+}
+
+/* vec_any_nlt */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_any_nlt(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a);
+}
+
+/* vec_any_numeric */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_any_numeric(vector float __a) {
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a);
+}
+
+/* vec_any_out */
+
+static __inline__ int __attribute__((__always_inline__))
+vec_any_out(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b);
+}
+
+/* Power 8 Crypto functions
+Note: We diverge from the current GCC implementation with regard
+to cryptography and related functions as follows:
+- Only the SHA and AES instructions and builtins are disabled by -mno-crypto
+- The remaining ones are only available on Power8 and up so
+ require -mpower8-vector
+The justification for this is that export requirements require that
+Category:Vector.Crypto is optional (i.e. compliant hardware may not provide
+support). As a result, we need to be able to turn off support for those.
+The remaining ones (currently controlled by -mcrypto for GCC) still
+need to be provided on compliant hardware even if Vector.Crypto is not
+provided.
+*/
+#ifdef __CRYPTO__
+#define vec_sbox_be __builtin_altivec_crypto_vsbox
+#define vec_cipher_be __builtin_altivec_crypto_vcipher
+#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast
+#define vec_ncipher_be __builtin_altivec_crypto_vncipher
+#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
+
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vsbox(vector unsigned long long __a) {
+ return __builtin_altivec_crypto_vsbox(__a);
+}
+
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vcipher(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vcipher(__a, __b);
+}
+
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vcipherlast(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vcipherlast(__a, __b);
+}
+
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vncipher(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vncipher(__a, __b);
+}
+
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vncipherlast(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vncipherlast(__a, __b);
+}
+
+#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
+#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
+
+#define vec_shasigma_be(X, Y, Z) \
+ _Generic((X), vector unsigned int \
+ : __builtin_crypto_vshasigmaw, vector unsigned long long \
+ : __builtin_crypto_vshasigmad)((X), (Y), (Z))
+#endif
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector unsigned char __ATTRS_o_ai
+__builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+__builtin_crypto_vpermxor(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector unsigned short)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor(
+ vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
+ return (vector unsigned int)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+__builtin_crypto_vpermxor(vector unsigned long long __a,
+ vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return (vector unsigned long long)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_crypto_vpmsumb(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_crypto_vpmsumh(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_crypto_vpmsumw(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vpmsumd(__a, __b);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_vgbbd(vector signed char __a) {
+ return __builtin_altivec_vgbbd((vector unsigned char)__a);
+}
+
+#define vec_pmsum_be __builtin_crypto_vpmsumb
+#define vec_gb __builtin_altivec_vgbbd
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vgbbd(vector unsigned char __a) {
+ return __builtin_altivec_vgbbd(__a);
+}
+
+static __inline__ vector long long __ATTRS_o_ai
+vec_vbpermq(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vbpermq((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static __inline__ vector long long __ATTRS_o_ai
+vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vbpermq(__a, __b);
+}
+
+#ifdef __powerpc64__
+static __inline__ vector unsigned long long __attribute__((__always_inline__))
+vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) {
+ return __builtin_altivec_vbpermq((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+#endif
+#endif
+
+#undef __ATTRS_o_ai
+
+#endif /* __ALTIVEC_H */
diff --git a/current/clang-include/ammintrin.h b/current/clang-include/ammintrin.h
new file mode 100644
index 0000000..8985bb4
--- /dev/null
+++ b/current/clang-include/ammintrin.h
@@ -0,0 +1,194 @@
+/*===---- ammintrin.h - SSE4a intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __AMMINTRIN_H
+#define __AMMINTRIN_H
+
+#include <pmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a")))
+
+/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
+/// integer vector operand at the index idx and of the length len.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_extracti_si64(__m128i x, const int len, const int idx);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c EXTRQ instruction.
+///
+/// \param x
+/// The value from which bits are extracted.
+/// \param len
+/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]
+/// are zero, the length is interpreted as 64.
+/// \param idx
+/// Bits [5:0] specify the index of the least significant bit; the other
+/// bits are ignored. If the sum of the index and length is greater than 64,
+/// the result is undefined. If the length and index are both zero, bits
+/// [63:0] of parameter x are extracted. If the length is zero but the index
+/// is non-zero, the result is undefined.
+/// \returns A 128-bit integer vector whose lower 64 bits contain the bits
+/// extracted from the source operand.
+#define _mm_extracti_si64(x, len, idx) \
+ ((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(x), \
+ (char)(len), (char)(idx)))
+
+/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
+/// integer vector operand at the index and of the length specified by __y.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c EXTRQ instruction.
+///
+/// \param __x
+/// The value from which bits are extracted.
+/// \param __y
+/// Specifies the index of the least significant bit at [13:8] and the
+/// length at [5:0]; all other bits are ignored. If bits [5:0] are zero, the
+/// length is interpreted as 64. If the sum of the index and length is
+/// greater than 64, the result is undefined. If the length and index are
+/// both zero, bits [63:0] of parameter __x are extracted. If the length is
+/// zero but the index is non-zero, the result is undefined.
+/// \returns A 128-bit vector whose lower 64 bits contain the bits extracted
+/// from the source operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_extract_si64(__m128i __x, __m128i __y)
+{
+ return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);
+}
+
+/// \brief Inserts bits of a specified length from the source integer vector y
+/// into the lower 64 bits of the destination integer vector x at the index
+/// idx and of the length len.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_inserti_si64(__m128i x, __m128i y, const int len,
+/// const int idx);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c INSERTQ instruction.
+///
+/// \param x
+/// The destination operand where bits will be inserted. The inserted bits
+/// are defined by the length len and by the index idx specifying the least
+/// significant bit.
+/// \param y
+/// The source operand containing the bits to be extracted. The extracted
+/// bits are the least significant bits of operand y of length len.
+/// \param len
+/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]
+/// are zero, the length is interpreted as 64.
+/// \param idx
+/// Bits [5:0] specify the index of the least significant bit; the other
+/// bits are ignored. If the sum of the index and length is greater than 64,
+/// the result is undefined. If the length and index are both zero, bits
+/// [63:0] of parameter y are inserted into parameter x. If the length is
+/// zero but the index is non-zero, the result is undefined.
+/// \returns A 128-bit integer vector containing the original lower 64-bits of
+/// destination operand x with the specified bitfields replaced by the lower
+/// bits of source operand y. The upper 64 bits of the return value are
+/// undefined.
+
+#define _mm_inserti_si64(x, y, len, idx) \
+ ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \
+ (__v2di)(__m128i)(y), \
+ (char)(len), (char)(idx)))
+
+/// \brief Inserts bits of a specified length from the source integer vector
+/// __y into the lower 64 bits of the destination integer vector __x at the
+/// index and of the length specified by __y.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c INSERTQ instruction.
+///
+/// \param __x
+/// The destination operand where bits will be inserted. The inserted bits
+/// are defined by the length and by the index of the least significant bit
+/// specified by operand __y.
+/// \param __y
+/// The source operand containing the bits to be extracted. The extracted
+/// bits are the least significant bits of operand __y with length specified
+/// by bits [69:64]. These are inserted into the destination at the index
+/// specified by bits [77:72]; all other bits are ignored. If bits [69:64]
+/// are zero, the length is interpreted as 64. If the sum of the index and
+/// length is greater than 64, the result is undefined. If the length and
+/// index are both zero, bits [63:0] of parameter __y are inserted into
+/// parameter __x. If the length is zero but the index is non-zero, the
+/// result is undefined.
+/// \returns A 128-bit integer vector containing the original lower 64-bits of
+/// destination operand __x with the specified bitfields replaced by the
+/// lower bits of source operand __y. The upper 64 bits of the return value
+/// are undefined.
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_insert_si64(__m128i __x, __m128i __y)
+{
+ return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y);
+}
+
+/// \brief Stores a 64-bit double-precision value in a 64-bit memory location.
+/// To minimize caching, the data is flagged as non-temporal (unlikely to be
+/// used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MOVNTSD instruction.
+///
+/// \param __p
+/// The 64-bit memory location used to store the register value.
+/// \param __a
+/// The 64-bit double-precision floating-point register value to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_sd(double *__p, __m128d __a)
+{
+ __builtin_ia32_movntsd(__p, (__v2df)__a);
+}
+
+/// \brief Stores a 32-bit single-precision floating-point value in a 32-bit
+/// memory location. To minimize caching, the data is flagged as
+/// non-temporal (unlikely to be used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MOVNTSS instruction.
+///
+/// \param __p
+/// The 32-bit memory location used to store the register value.
+/// \param __a
+/// The 32-bit single-precision floating-point register value to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_ss(float *__p, __m128 __a)
+{
+ __builtin_ia32_movntss(__p, (__v4sf)__a);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AMMINTRIN_H */
diff --git a/current/clang-include/arm_acle.h b/current/clang-include/arm_acle.h
new file mode 100644
index 0000000..8423e62
--- /dev/null
+++ b/current/clang-include/arm_acle.h
@@ -0,0 +1,312 @@
+/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_ACLE_H
+#define __ARM_ACLE_H
+
+#ifndef __ARM_ACLE
+#error "ACLE intrinsics support not enabled."
+#endif
+
+#include <stdint.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
+/* 8.3 Memory barriers */
+#if !defined(_MSC_VER)
+#define __dmb(i) __builtin_arm_dmb(i)
+#define __dsb(i) __builtin_arm_dsb(i)
+#define __isb(i) __builtin_arm_isb(i)
+#endif
+
+/* 8.4 Hints */
+
+#if !defined(_MSC_VER)
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
+ __builtin_arm_wfi();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
+ __builtin_arm_wfe();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
+ __builtin_arm_sev();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
+ __builtin_arm_sevl();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
+ __builtin_arm_yield();
+}
+#endif
+
+#if __ARM_32BIT_STATE
+#define __dbg(t) __builtin_arm_dbg(t)
+#endif
+
+/* 8.5 Swap */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__swp(uint32_t __x, volatile uint32_t *__p) {
+ uint32_t v;
+ do
+ v = __builtin_arm_ldrex(__p);
+ while (__builtin_arm_strex(__x, __p));
+ return v;
+}
+
+/* 8.6 Memory prefetch intrinsics */
+/* 8.6.1 Data prefetch */
+#define __pld(addr) __pldx(0, 0, 0, addr)
+
+#if __ARM_32BIT_STATE
+#define __pldx(access_kind, cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, access_kind, 1)
+#else
+#define __pldx(access_kind, cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
+#endif
+
+/* 8.6.2 Instruction prefetch */
+#define __pli(addr) __plix(0, 0, addr)
+
+#if __ARM_32BIT_STATE
+#define __plix(cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, 0, 0)
+#else
+#define __plix(cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
+#endif
+
+/* 8.7 NOP */
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
+ __builtin_arm_nop();
+}
+
+/* 9 DATA-PROCESSING INTRINSICS */
+/* 9.2 Miscellaneous data-processing intrinsics */
+/* ROR */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__ror(uint32_t __x, uint32_t __y) {
+ __y %= 32;
+ if (__y == 0)
+ return __x;
+ return (__x >> __y) | (__x << (32 - __y));
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__rorll(uint64_t __x, uint32_t __y) {
+ __y %= 64;
+ if (__y == 0)
+ return __x;
+ return (__x >> __y) | (__x << (64 - __y));
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+__rorl(unsigned long __x, uint32_t __y) {
+#if __SIZEOF_LONG__ == 4
+ return __ror(__x, __y);
+#else
+ return __rorll(__x, __y);
+#endif
+}
+
+
+/* CLZ */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__clz(uint32_t __t) {
+ return __builtin_clz(__t);
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+__clzl(unsigned long __t) {
+ return __builtin_clzl(__t);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__clzll(uint64_t __t) {
+ return __builtin_clzll(__t);
+}
+
+/* REV */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__rev(uint32_t __t) {
+ return __builtin_bswap32(__t);
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+__revl(unsigned long __t) {
+#if __SIZEOF_LONG__ == 4
+ return __builtin_bswap32(__t);
+#else
+ return __builtin_bswap64(__t);
+#endif
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__revll(uint64_t __t) {
+ return __builtin_bswap64(__t);
+}
+
+/* REV16 */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__rev16(uint32_t __t) {
+ return __ror(__rev(__t), 16);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__rev16ll(uint64_t __t) {
+ return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t);
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+__rev16l(unsigned long __t) {
+#if __SIZEOF_LONG__ == 4
+ return __rev16(__t);
+#else
+ return __rev16ll(__t);
+#endif
+}
+
+/* REVSH */
+static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
+__revsh(int16_t __t) {
+ return __builtin_bswap16(__t);
+}
+
+/* RBIT */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__rbit(uint32_t __t) {
+ return __builtin_arm_rbit(__t);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__rbitll(uint64_t __t) {
+#if __ARM_32BIT_STATE
+ return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
+ __builtin_arm_rbit(__t >> 32);
+#else
+ return __builtin_arm_rbit64(__t);
+#endif
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+__rbitl(unsigned long __t) {
+#if __SIZEOF_LONG__ == 4
+ return __rbit(__t);
+#else
+ return __rbitll(__t);
+#endif
+}
+
+/*
+ * 9.4 Saturating intrinsics
+ *
+ * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
+ * intrinsics are implemented and the flag is enabled.
+ */
+/* 9.4.1 Width-specified saturation intrinsics */
+#if __ARM_32BIT_STATE
+#define __ssat(x, y) __builtin_arm_ssat(x, y)
+#define __usat(x, y) __builtin_arm_usat(x, y)
+#endif
+
+/* 9.4.2 Saturating addition and subtraction intrinsics */
+#if __ARM_32BIT_STATE
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__qadd(int32_t __t, int32_t __v) {
+ return __builtin_arm_qadd(__t, __v);
+}
+
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__qsub(int32_t __t, int32_t __v) {
+ return __builtin_arm_qsub(__t, __v);
+}
+
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__qdbl(int32_t __t) {
+ return __builtin_arm_qadd(__t, __t);
+}
+#endif
+
+/* 9.7 CRC32 intrinsics */
+#if __ARM_FEATURE_CRC32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__crc32b(uint32_t __a, uint8_t __b) {
+ return __builtin_arm_crc32b(__a, __b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__crc32h(uint32_t __a, uint16_t __b) {
+ return __builtin_arm_crc32h(__a, __b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__crc32w(uint32_t __a, uint32_t __b) {
+ return __builtin_arm_crc32w(__a, __b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__crc32d(uint32_t __a, uint64_t __b) {
+ return __builtin_arm_crc32d(__a, __b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__crc32cb(uint32_t __a, uint8_t __b) {
+ return __builtin_arm_crc32cb(__a, __b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__crc32ch(uint32_t __a, uint16_t __b) {
+ return __builtin_arm_crc32ch(__a, __b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__crc32cw(uint32_t __a, uint32_t __b) {
+ return __builtin_arm_crc32cw(__a, __b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__crc32cd(uint32_t __a, uint64_t __b) {
+ return __builtin_arm_crc32cd(__a, __b);
+}
+#endif
+
+/* 10.1 Special register intrinsics */
+#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
+#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
+#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
+#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
+#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
+#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __ARM_ACLE_H */
diff --git a/current/clang-include/avx2intrin.h b/current/clang-include/avx2intrin.h
new file mode 100644
index 0000000..13bcbef
--- /dev/null
+++ b/current/clang-include/avx2intrin.h
@@ -0,0 +1,1299 @@
+/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx2intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX2INTRIN_H
+#define __AVX2INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx2")))
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+#define _mm256_mpsadbw_epu8(X, Y, M) \
+ (__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
+ (__v32qi)(__m256i)(Y), (int)(M))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi8(__m256i __a)
+{
+ return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi16(__m256i __a)
+{
+ return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi32(__m256i __a)
+{
+ return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packs_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packus_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packus_epi32(__m256i __V1, __m256i __V2)
+{
+ return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qu)__a + (__v32qu)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hu)__a + (__v16hu)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a + (__v8su)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a + (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
+}
+
+#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \
+ (__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (n)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_and_si256(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a & (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_andnot_si256(__m256i __a, __m256i __b)
+{
+ return (__m256i)(~(__v4du)__a & (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_avg_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_avg_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
+ (__v32qi)__M);
+}
+
+#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(V1), \
+ (__v16hi)(__m256i)(V2), \
+ (((M) & 0x01) ? 16 : 0), \
+ (((M) & 0x02) ? 17 : 1), \
+ (((M) & 0x04) ? 18 : 2), \
+ (((M) & 0x08) ? 19 : 3), \
+ (((M) & 0x10) ? 20 : 4), \
+ (((M) & 0x20) ? 21 : 5), \
+ (((M) & 0x40) ? 22 : 6), \
+ (((M) & 0x80) ? 23 : 7), \
+ (((M) & 0x01) ? 24 : 8), \
+ (((M) & 0x02) ? 25 : 9), \
+ (((M) & 0x04) ? 26 : 10), \
+ (((M) & 0x08) ? 27 : 11), \
+ (((M) & 0x10) ? 28 : 12), \
+ (((M) & 0x20) ? 29 : 13), \
+ (((M) & 0x40) ? 30 : 14), \
+ (((M) & 0x80) ? 31 : 15)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qi)__a == (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a == (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a == (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4di)__a == (__v4di)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
+{
+ /* This function always performs a signed comparison, but __v32qi is a char
+ which may be signed or unsigned, so use __v32qs. */
+ return (__m256i)((__v32qs)__a > (__v32qs)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a > (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a > (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4di)__a > (__v4di)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hadd_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hadd_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hadds_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hsub_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hsub_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hsubs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maddubs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_madd_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm256_movemask_epi8(__m256i __a)
+{
+ return __builtin_ia32_pmovmskb256((__v32qi)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi8_epi16(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi8_epi32(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi8_epi64(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mul_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mulhi_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mulhi_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mullo_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hu)__a * (__v16hu)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mullo_epi32 (__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a * (__v8su)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mul_epu32(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_or_si256(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a | (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sad_epu8(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_shuffle_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
+}
+
+#define _mm256_shuffle_epi32(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v8si)(__m256i)(a), \
+ (__v8si)_mm256_undefined_si256(), \
+ 0 + (((imm) >> 0) & 0x3), \
+ 0 + (((imm) >> 2) & 0x3), \
+ 0 + (((imm) >> 4) & 0x3), \
+ 0 + (((imm) >> 6) & 0x3), \
+ 4 + (((imm) >> 0) & 0x3), \
+ 4 + (((imm) >> 2) & 0x3), \
+ 4 + (((imm) >> 4) & 0x3), \
+ 4 + (((imm) >> 6) & 0x3)); })
+
+#define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
+ (__v16hi)_mm256_undefined_si256(), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) >> 0) & 0x3), \
+ 4 + (((imm) >> 2) & 0x3), \
+ 4 + (((imm) >> 4) & 0x3), \
+ 4 + (((imm) >> 6) & 0x3), \
+ 8, 9, 10, 11, \
+ 12 + (((imm) >> 0) & 0x3), \
+ 12 + (((imm) >> 2) & 0x3), \
+ 12 + (((imm) >> 4) & 0x3), \
+ 12 + (((imm) >> 6) & 0x3)); })
+
+#define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
+ (__v16hi)_mm256_undefined_si256(), \
+ 0 + (((imm) >> 0) & 0x3), \
+ 0 + (((imm) >> 2) & 0x3), \
+ 0 + (((imm) >> 4) & 0x3), \
+ 0 + (((imm) >> 6) & 0x3), \
+ 4, 5, 6, 7, \
+ 8 + (((imm) >> 0) & 0x3), \
+ 8 + (((imm) >> 2) & 0x3), \
+ 8 + (((imm) >> 4) & 0x3), \
+ 8 + (((imm) >> 6) & 0x3), \
+ 12, 13, 14, 15); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sign_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sign_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sign_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
+}
+
+#define _mm256_slli_si256(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector( \
+ (__v32qi)_mm256_setzero_si256(), \
+ (__v32qi)(__m256i)(a), \
+ ((char)(imm)&0xF0) ? 0 : ((char)(imm)>0x0 ? 16 : 32) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 1 : ((char)(imm)>0x1 ? 17 : 33) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 2 : ((char)(imm)>0x2 ? 18 : 34) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 3 : ((char)(imm)>0x3 ? 19 : 35) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 4 : ((char)(imm)>0x4 ? 20 : 36) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 5 : ((char)(imm)>0x5 ? 21 : 37) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 6 : ((char)(imm)>0x6 ? 22 : 38) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 7 : ((char)(imm)>0x7 ? 23 : 39) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 8 : ((char)(imm)>0x8 ? 24 : 40) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 9 : ((char)(imm)>0x9 ? 25 : 41) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 10 : ((char)(imm)>0xA ? 26 : 42) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 11 : ((char)(imm)>0xB ? 27 : 43) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 12 : ((char)(imm)>0xC ? 28 : 44) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 13 : ((char)(imm)>0xD ? 29 : 45) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 14 : ((char)(imm)>0xE ? 30 : 46) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 15 : ((char)(imm)>0xF ? 31 : 47) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 16 : ((char)(imm)>0x0 ? 32 : 48) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 17 : ((char)(imm)>0x1 ? 33 : 49) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 18 : ((char)(imm)>0x2 ? 34 : 50) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 19 : ((char)(imm)>0x3 ? 35 : 51) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 20 : ((char)(imm)>0x4 ? 36 : 52) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 21 : ((char)(imm)>0x5 ? 37 : 53) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 22 : ((char)(imm)>0x6 ? 38 : 54) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 23 : ((char)(imm)>0x7 ? 39 : 55) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 24 : ((char)(imm)>0x8 ? 40 : 56) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 25 : ((char)(imm)>0x9 ? 41 : 57) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 26 : ((char)(imm)>0xA ? 42 : 58) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 27 : ((char)(imm)>0xB ? 43 : 59) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 28 : ((char)(imm)>0xC ? 44 : 60) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 29 : ((char)(imm)>0xD ? 45 : 61) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 30 : ((char)(imm)>0xE ? 46 : 62) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 31 : ((char)(imm)>0xF ? 47 : 63) - (char)(imm)); })
+
+#define _mm256_bslli_epi128(a, count) _mm256_slli_si256((a), (count))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_slli_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sll_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_slli_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sll_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_slli_epi64(__m256i __a, int __count)
+{
+ return __builtin_ia32_psllqi256((__v4di)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sll_epi64(__m256i __a, __m128i __count)
+{
+ return __builtin_ia32_psllq256((__v4di)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srai_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sra_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srai_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sra_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
+}
+
+#define _mm256_srli_si256(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector( \
+ (__v32qi)(__m256i)(a), \
+ (__v32qi)_mm256_setzero_si256(), \
+ ((char)(imm)&0xF0) ? 32 : (char)(imm) + ((char)(imm)>0xF ? 16 : 0), \
+ ((char)(imm)&0xF0) ? 33 : (char)(imm) + ((char)(imm)>0xE ? 17 : 1), \
+ ((char)(imm)&0xF0) ? 34 : (char)(imm) + ((char)(imm)>0xD ? 18 : 2), \
+ ((char)(imm)&0xF0) ? 35 : (char)(imm) + ((char)(imm)>0xC ? 19 : 3), \
+ ((char)(imm)&0xF0) ? 36 : (char)(imm) + ((char)(imm)>0xB ? 20 : 4), \
+ ((char)(imm)&0xF0) ? 37 : (char)(imm) + ((char)(imm)>0xA ? 21 : 5), \
+ ((char)(imm)&0xF0) ? 38 : (char)(imm) + ((char)(imm)>0x9 ? 22 : 6), \
+ ((char)(imm)&0xF0) ? 39 : (char)(imm) + ((char)(imm)>0x8 ? 23 : 7), \
+ ((char)(imm)&0xF0) ? 40 : (char)(imm) + ((char)(imm)>0x7 ? 24 : 8), \
+ ((char)(imm)&0xF0) ? 41 : (char)(imm) + ((char)(imm)>0x6 ? 25 : 9), \
+ ((char)(imm)&0xF0) ? 42 : (char)(imm) + ((char)(imm)>0x5 ? 26 : 10), \
+ ((char)(imm)&0xF0) ? 43 : (char)(imm) + ((char)(imm)>0x4 ? 27 : 11), \
+ ((char)(imm)&0xF0) ? 44 : (char)(imm) + ((char)(imm)>0x3 ? 28 : 12), \
+ ((char)(imm)&0xF0) ? 45 : (char)(imm) + ((char)(imm)>0x2 ? 29 : 13), \
+ ((char)(imm)&0xF0) ? 46 : (char)(imm) + ((char)(imm)>0x1 ? 30 : 14), \
+ ((char)(imm)&0xF0) ? 47 : (char)(imm) + ((char)(imm)>0x0 ? 31 : 15), \
+ ((char)(imm)&0xF0) ? 48 : (char)(imm) + ((char)(imm)>0xF ? 32 : 16), \
+ ((char)(imm)&0xF0) ? 49 : (char)(imm) + ((char)(imm)>0xE ? 33 : 17), \
+ ((char)(imm)&0xF0) ? 50 : (char)(imm) + ((char)(imm)>0xD ? 34 : 18), \
+ ((char)(imm)&0xF0) ? 51 : (char)(imm) + ((char)(imm)>0xC ? 35 : 19), \
+ ((char)(imm)&0xF0) ? 52 : (char)(imm) + ((char)(imm)>0xB ? 36 : 20), \
+ ((char)(imm)&0xF0) ? 53 : (char)(imm) + ((char)(imm)>0xA ? 37 : 21), \
+ ((char)(imm)&0xF0) ? 54 : (char)(imm) + ((char)(imm)>0x9 ? 38 : 22), \
+ ((char)(imm)&0xF0) ? 55 : (char)(imm) + ((char)(imm)>0x8 ? 39 : 23), \
+ ((char)(imm)&0xF0) ? 56 : (char)(imm) + ((char)(imm)>0x7 ? 40 : 24), \
+ ((char)(imm)&0xF0) ? 57 : (char)(imm) + ((char)(imm)>0x6 ? 41 : 25), \
+ ((char)(imm)&0xF0) ? 58 : (char)(imm) + ((char)(imm)>0x5 ? 42 : 26), \
+ ((char)(imm)&0xF0) ? 59 : (char)(imm) + ((char)(imm)>0x4 ? 43 : 27), \
+ ((char)(imm)&0xF0) ? 60 : (char)(imm) + ((char)(imm)>0x3 ? 44 : 28), \
+ ((char)(imm)&0xF0) ? 61 : (char)(imm) + ((char)(imm)>0x2 ? 45 : 29), \
+ ((char)(imm)&0xF0) ? 62 : (char)(imm) + ((char)(imm)>0x1 ? 46 : 30), \
+ ((char)(imm)&0xF0) ? 63 : (char)(imm) + ((char)(imm)>0x0 ? 47 : 31)); })
+
+#define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srli_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srl_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srli_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srl_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srli_epi64(__m256i __a, int __count)
+{
+ return __builtin_ia32_psrlqi256((__v4di)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srl_epi64(__m256i __a, __m128i __count)
+{
+ return __builtin_ia32_psrlq256((__v4di)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qu)__a - (__v32qu)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hu)__a - (__v16hu)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a - (__v8su)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a - (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_xor_si256(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a ^ (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_stream_load_si256(__m256i const *__V)
+{
+ return (__m256i)__builtin_ia32_movntdqa256((const __v4di *)__V);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_broadcastss_ps(__m128 __X)
+{
+ return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_broadcastsd_pd(__m128d __a)
+{
+ return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcastss_ps(__m128 __X)
+{
+ return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcastsd_pd(__m128d __X)
+{
+ return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastsi128_si256(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
+}
+
+#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v4si)(__m128i)(V1), \
+ (__v4si)(__m128i)(V2), \
+ (((M) & 0x01) ? 4 : 0), \
+ (((M) & 0x02) ? 5 : 1), \
+ (((M) & 0x04) ? 6 : 2), \
+ (((M) & 0x08) ? 7 : 3)); })
+
+#define _mm256_blend_epi32(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), \
+ (((M) & 0x01) ? 8 : 0), \
+ (((M) & 0x02) ? 9 : 1), \
+ (((M) & 0x04) ? 10 : 2), \
+ (((M) & 0x08) ? 11 : 3), \
+ (((M) & 0x10) ? 12 : 4), \
+ (((M) & 0x20) ? 13 : 5), \
+ (((M) & 0x40) ? 14 : 6), \
+ (((M) & 0x80) ? 15 : 7)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastb_epi8(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastw_epi16(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastd_epi32(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastq_epi64(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastb_epi8(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastw_epi16(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastd_epi32(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastq_epi64(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
+}
+
+#define _mm256_permute4x64_pd(V, M) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V), \
+ (__v4df)_mm256_undefined_pd(), \
+ ((M) >> 0) & 0x3, \
+ ((M) >> 2) & 0x3, \
+ ((M) >> 4) & 0x3, \
+ ((M) >> 6) & 0x3); })
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
+{
+ return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
+}
+
+#define _mm256_permute4x64_epi64(V, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V), \
+ (__v4di)_mm256_undefined_si256(), \
+ ((M) >> 0) & 0x3, \
+ ((M) >> 2) & 0x3, \
+ ((M) >> 4) & 0x3, \
+ ((M) >> 6) & 0x3); })
+
+#define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (M)); })
+
+#define _mm256_extracti128_si256(V, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v4di)(__m256i)(V), \
+ (__v4di)_mm256_undefined_si256(), \
+ (((M) & 1) ? 2 : 0), \
+ (((M) & 1) ? 3 : 1) ); })
+
+#define _mm256_inserti128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V1), \
+ (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
+ (((M) & 1) ? 0 : 4), \
+ (((M) & 1) ? 1 : 5), \
+ (((M) & 1) ? 4 : 2), \
+ (((M) & 1) ? 5 : 3) ); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskload_epi32(int const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskload_epi64(long long const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskload_epi32(int const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskload_epi64(long long const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sllv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sllv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sllv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sllv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srav_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srav_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srlv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srlv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srlv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srlv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
+}
+
+#define _mm_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)); })
+
+#define _mm256_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)(__m256d)(mask), (s)); })
+
+#define _mm_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)); })
+
+#define _mm256_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)(__m256d)(mask), (s)); })
+
+#define _mm_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
+
+#define _mm256_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)(__m256)(mask), (s)); })
+
+#define _mm_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
+
+#define _mm256_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
+
+#define _mm_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
+ (int const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8si)(__m256i)(mask), (s)); })
+
+#define _mm_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
+
+#define _mm_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)(__m256i)(mask), (s)); })
+
+#define _mm_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)(__m256i)(mask), (s)); })
+
+#define _mm_i32gather_pd(m, i, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)); })
+
+#define _mm256_i32gather_pd(m, i, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)); })
+
+#define _mm_i64gather_pd(m, i, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)); })
+
+#define _mm256_i64gather_pd(m, i, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)); })
+
+#define _mm_i32gather_ps(m, i, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
+
+#define _mm256_i32gather_ps(m, i, s) __extension__ ({ \
+ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
+ _mm256_setzero_ps(), \
+ _CMP_EQ_OQ), \
+ (s)); })
+
+#define _mm_i64gather_ps(m, i, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
+
+#define _mm256_i64gather_ps(m, i, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
+
+#define _mm_i32gather_epi32(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4si)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm256_i32gather_epi32(m, i, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
+ (int const *)(m), (__v8si)(__m256i)(i), \
+ (__v8si)_mm256_set1_epi32(-1), (s)); })
+
+#define _mm_i64gather_epi32(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v2di)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm256_i64gather_epi32(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4di)(__m256i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm_i32gather_epi64(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)); })
+
+#define _mm256_i32gather_epi64(m, i, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)); })
+
+#define _mm_i64gather_epi64(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)); })
+
+#define _mm256_i64gather_epi64(m, i, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVX2INTRIN_H */
diff --git a/current/clang-include/avx512bwintrin.h b/current/clang-include/avx512bwintrin.h
new file mode 100644
index 0000000..d3c5a6c
--- /dev/null
+++ b/current/clang-include/avx512bwintrin.h
@@ -0,0 +1,2410 @@
+/*===------------- avx512bwintrin.h - AVX512BW intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512bwintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512BWINTRIN_H
+#define __AVX512BWINTRIN_H
+
+typedef unsigned int __mmask32;
+typedef unsigned long long __mmask64;
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_setzero_qi(void) {
+ return (__m512i)(__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_setzero_hi(void) {
+ return (__m512i)(__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+/* Integer compare */
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__a, (__v32hi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__a, (__v32hi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 0,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpgtb512_mask((__v64qi)__a, (__v64qi)__b,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpgtb512_mask((__v64qi)__a, (__v64qi)__b,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 6,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtw512_mask((__v32hi)__a, (__v32hi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtw512_mask((__v32hi)__a, (__v32hi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 6,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ __u);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v64qu) __A + (__v64qu) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v64qu) __A - (__v64qu) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v32hu) __A + (__v32hu) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v32hu) __A - (__v32hu) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v32hu) __A * (__v32hu) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mullo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mullo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
+ (__v64qi) __W,
+ (__v64qi) __A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
+ (__v32hi) __W,
+ (__v32hi) __A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi8 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi16 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packs_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packs_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packs_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) __W,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packs_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packs_epi16 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packs_epi16 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packus_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packus_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packus_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) __W,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packus_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packus_epi16 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packus_epi16 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_avg_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_avg_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_shuffle_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_shuffle_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_shuffle_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_epi16 (__m512i __A, __m512i __I,
+ __mmask32 __U, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermi2varhi512_mask ((__v32hi) __A,
+ (__v32hi) __I /* idx */ ,
+ (__v32hi) __B,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi16 (__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I /* idx */,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_epi16 (__m512i __A, __mmask32 __U,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I /* idx */,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_epi16 (__mmask32 __U, __m512i __A,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varhi512_maskz ((__v32hi) __I
+ /* idx */ ,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhrs_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhrs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhrs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhi_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhi_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhi_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maddubs_epi16 (__m512i __X, __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_maddubs_epi16 (__m512i __W, __mmask32 __U, __m512i __X,
+ __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_maddubs_epi16 (__mmask32 __U, __m512i __X, __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_madd_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) _mm512_setzero_si512(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_madd_epi16 (__m512i __W, __mmask16 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_madd_epi16 (__mmask16 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) _mm512_setzero_si512(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi)_mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi)__O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
+{
+ __builtin_ia32_pmovwb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
+{
+ __builtin_ia32_pmovswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
+{
+ __builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi8(__m512i __A, __m512i __B) {
+ return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
+ 8, 64+8, 9, 64+9,
+ 10, 64+10, 11, 64+11,
+ 12, 64+12, 13, 64+13,
+ 14, 64+14, 15, 64+15,
+ 24, 64+24, 25, 64+25,
+ 26, 64+26, 27, 64+27,
+ 28, 64+28, 29, 64+29,
+ 30, 64+30, 31, 64+31,
+ 40, 64+40, 41, 64+41,
+ 42, 64+42, 43, 64+43,
+ 44, 64+44, 45, 64+45,
+ 46, 64+46, 47, 64+47,
+ 56, 64+56, 57, 64+57,
+ 58, 64+58, 59, 64+59,
+ 60, 64+60, 61, 64+61,
+ 62, 64+62, 63, 64+63);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_unpackhi_epi8(__A, __B),
+ (__v64qi)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_unpackhi_epi8(__A, __B),
+ (__v64qi)_mm512_setzero_qi());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi16(__m512i __A, __m512i __B) {
+ return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
+ 4, 32+4, 5, 32+5,
+ 6, 32+6, 7, 32+7,
+ 12, 32+12, 13, 32+13,
+ 14, 32+14, 15, 32+15,
+ 20, 32+20, 21, 32+21,
+ 22, 32+22, 23, 32+23,
+ 28, 32+28, 29, 32+29,
+ 30, 32+30, 31, 32+31);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_unpackhi_epi16(__A, __B),
+ (__v32hi)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_unpackhi_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi8(__m512i __A, __m512i __B) {
+ return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
+ 0, 64+0, 1, 64+1,
+ 2, 64+2, 3, 64+3,
+ 4, 64+4, 5, 64+5,
+ 6, 64+6, 7, 64+7,
+ 16, 64+16, 17, 64+17,
+ 18, 64+18, 19, 64+19,
+ 20, 64+20, 21, 64+21,
+ 22, 64+22, 23, 64+23,
+ 32, 64+32, 33, 64+33,
+ 34, 64+34, 35, 64+35,
+ 36, 64+36, 37, 64+37,
+ 38, 64+38, 39, 64+39,
+ 48, 64+48, 49, 64+49,
+ 50, 64+50, 51, 64+51,
+ 52, 64+52, 53, 64+53,
+ 54, 64+54, 55, 64+55);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_unpacklo_epi8(__A, __B),
+ (__v64qi)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_unpacklo_epi8(__A, __B),
+ (__v64qi)_mm512_setzero_qi());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi16(__m512i __A, __m512i __B) {
+ return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
+ 0, 32+0, 1, 32+1,
+ 2, 32+2, 3, 32+3,
+ 8, 32+8, 9, 32+9,
+ 10, 32+10, 11, 32+11,
+ 16, 32+16, 17, 32+17,
+ 18, 32+18, 19, 32+19,
+ 24, 32+24, 25, 32+25,
+ 26, 32+26, 27, 32+27);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_unpacklo_epi16(__A, __B),
+ (__v32hi)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_unpacklo_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi8_epi16 (__m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi8_epi16 (__m512i __W, __mmask32 __U, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi8_epi16 (__mmask32 __U, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
+ (__v32hi)
+ _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu8_epi16 (__m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu8_epi16 (__m512i __W, __mmask32 __U, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu8_epi16 (__mmask32 __U, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
+ (__v32hi)
+ _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+
+#define _mm512_cmp_epi8_mask(a, b, p) __extension__ ({ \
+ (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), (int)(p), \
+ (__mmask64)-1); })
+
+#define _mm512_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), (int)(p), \
+ (__mmask64)(m)); })
+
+#define _mm512_cmp_epu8_mask(a, b, p) __extension__ ({ \
+ (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), (int)(p), \
+ (__mmask64)-1); })
+
+#define _mm512_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), (int)(p), \
+ (__mmask64)(m)); })
+
+#define _mm512_cmp_epi16_mask(a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), (int)(p), \
+ (__mmask32)-1); })
+
+#define _mm512_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), (int)(p), \
+ (__mmask32)(m)); })
+
+#define _mm512_cmp_epu16_mask(a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), (int)(p), \
+ (__mmask32)-1); })
+
+#define _mm512_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), (int)(p), \
+ (__mmask32)(m)); })
+
+#define _mm512_shufflehi_epi16(A, imm) __extension__ ({ \
+ (__m512i)__builtin_shufflevector((__v32hi)(__m512i)(A), \
+ (__v32hi)_mm512_undefined_epi32(), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) >> 0) & 0x3), \
+ 4 + (((imm) >> 2) & 0x3), \
+ 4 + (((imm) >> 4) & 0x3), \
+ 4 + (((imm) >> 6) & 0x3), \
+ 8, 9, 10, 11, \
+ 12 + (((imm) >> 0) & 0x3), \
+ 12 + (((imm) >> 2) & 0x3), \
+ 12 + (((imm) >> 4) & 0x3), \
+ 12 + (((imm) >> 6) & 0x3), \
+ 16, 17, 18, 19, \
+ 20 + (((imm) >> 0) & 0x3), \
+ 20 + (((imm) >> 2) & 0x3), \
+ 20 + (((imm) >> 4) & 0x3), \
+ 20 + (((imm) >> 6) & 0x3), \
+ 24, 25, 26, 27, \
+ 28 + (((imm) >> 0) & 0x3), \
+ 28 + (((imm) >> 2) & 0x3), \
+ 28 + (((imm) >> 4) & 0x3), \
+ 28 + (((imm) >> 6) & 0x3)); })
+
+#define _mm512_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shufflehi_epi16((A), \
+ (imm)), \
+ (__v32hi)(__m512i)(W)); })
+
+#define _mm512_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shufflehi_epi16((A), \
+ (imm)), \
+ (__v32hi)_mm512_setzero_hi()); })
+
+#define _mm512_shufflelo_epi16(A, imm) __extension__ ({ \
+ (__m512i)__builtin_shufflevector((__v32hi)(__m512i)(A), \
+ (__v32hi)_mm512_undefined_epi32(), \
+ 0 + (((imm) >> 0) & 0x3), \
+ 0 + (((imm) >> 2) & 0x3), \
+ 0 + (((imm) >> 4) & 0x3), \
+ 0 + (((imm) >> 6) & 0x3), \
+ 4, 5, 6, 7, \
+ 8 + (((imm) >> 0) & 0x3), \
+ 8 + (((imm) >> 2) & 0x3), \
+ 8 + (((imm) >> 4) & 0x3), \
+ 8 + (((imm) >> 6) & 0x3), \
+ 12, 13, 14, 15, \
+ 16 + (((imm) >> 0) & 0x3), \
+ 16 + (((imm) >> 2) & 0x3), \
+ 16 + (((imm) >> 4) & 0x3), \
+ 16 + (((imm) >> 6) & 0x3), \
+ 20, 21, 22, 23, \
+ 24 + (((imm) >> 0) & 0x3), \
+ 24 + (((imm) >> 2) & 0x3), \
+ 24 + (((imm) >> 4) & 0x3), \
+ 24 + (((imm) >> 6) & 0x3), \
+ 28, 29, 30, 31); })
+
+
+#define _mm512_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shufflelo_epi16((A), \
+ (imm)), \
+ (__v32hi)(__m512i)(W)); })
+
+
+#define _mm512_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shufflelo_epi16((A), \
+ (imm)), \
+ (__v32hi)_mm512_setzero_hi()); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sllv_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sllv_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sll_epi16 (__m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
+ (__v8hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sll_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
+ (__v8hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sll_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
+ (__v8hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) __U);
+}
+
+#define _mm512_slli_epi16(A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psllwi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+ (__v32hi)_mm512_setzero_hi(), \
+ (__mmask32)-1); })
+
+#define _mm512_mask_slli_epi16(W, U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psllwi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+ (__v32hi)(__m512i)(W), \
+ (__mmask32)(U)); })
+
+#define _mm512_maskz_slli_epi16(U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psllwi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+ (__v32hi)_mm512_setzero_hi(), \
+ (__mmask32)(U)); })
+
+#define _mm512_bslli_epi128(a, imm) __extension__ ({ \
+ (__m512i)__builtin_shufflevector( \
+ (__v64qi)_mm512_setzero_si512(), \
+ (__v64qi)(__m512i)(a), \
+ ((char)(imm)&0xF0) ? 0 : ((char)(imm)>0x0 ? 16 : 64) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 1 : ((char)(imm)>0x1 ? 17 : 65) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 2 : ((char)(imm)>0x2 ? 18 : 66) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 3 : ((char)(imm)>0x3 ? 19 : 67) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 4 : ((char)(imm)>0x4 ? 20 : 68) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 5 : ((char)(imm)>0x5 ? 21 : 69) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 6 : ((char)(imm)>0x6 ? 22 : 70) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 7 : ((char)(imm)>0x7 ? 23 : 71) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 8 : ((char)(imm)>0x8 ? 24 : 72) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 9 : ((char)(imm)>0x9 ? 25 : 73) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 10 : ((char)(imm)>0xA ? 26 : 74) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 11 : ((char)(imm)>0xB ? 27 : 75) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 12 : ((char)(imm)>0xC ? 28 : 76) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 13 : ((char)(imm)>0xD ? 29 : 77) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 14 : ((char)(imm)>0xE ? 30 : 78) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 15 : ((char)(imm)>0xF ? 31 : 79) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 16 : ((char)(imm)>0x0 ? 32 : 80) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 17 : ((char)(imm)>0x1 ? 33 : 81) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 18 : ((char)(imm)>0x2 ? 34 : 82) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 19 : ((char)(imm)>0x3 ? 35 : 83) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 20 : ((char)(imm)>0x4 ? 36 : 84) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 21 : ((char)(imm)>0x5 ? 37 : 85) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 22 : ((char)(imm)>0x6 ? 38 : 86) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 23 : ((char)(imm)>0x7 ? 39 : 87) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 24 : ((char)(imm)>0x8 ? 40 : 88) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 25 : ((char)(imm)>0x9 ? 41 : 89) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 26 : ((char)(imm)>0xA ? 42 : 90) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 27 : ((char)(imm)>0xB ? 43 : 91) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 28 : ((char)(imm)>0xC ? 44 : 92) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 29 : ((char)(imm)>0xD ? 45 : 93) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 30 : ((char)(imm)>0xE ? 46 : 94) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 31 : ((char)(imm)>0xF ? 47 : 95) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 32 : ((char)(imm)>0x0 ? 48 : 96) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 33 : ((char)(imm)>0x1 ? 49 : 97) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 34 : ((char)(imm)>0x2 ? 50 : 98) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 35 : ((char)(imm)>0x3 ? 51 : 99) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 36 : ((char)(imm)>0x4 ? 52 : 100) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 37 : ((char)(imm)>0x5 ? 53 : 101) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 38 : ((char)(imm)>0x6 ? 54 : 102) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 39 : ((char)(imm)>0x7 ? 55 : 103) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 40 : ((char)(imm)>0x8 ? 56 : 104) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 41 : ((char)(imm)>0x9 ? 57 : 105) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 42 : ((char)(imm)>0xA ? 58 : 106) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 43 : ((char)(imm)>0xB ? 59 : 107) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 44 : ((char)(imm)>0xC ? 60 : 108) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 45 : ((char)(imm)>0xD ? 61 : 109) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 46 : ((char)(imm)>0xE ? 62 : 110) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 47 : ((char)(imm)>0xF ? 63 : 111) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 48 : ((char)(imm)>0x0 ? 64 : 112) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 49 : ((char)(imm)>0x1 ? 65 : 113) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 50 : ((char)(imm)>0x2 ? 66 : 114) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 51 : ((char)(imm)>0x3 ? 67 : 115) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 52 : ((char)(imm)>0x4 ? 68 : 116) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 53 : ((char)(imm)>0x5 ? 69 : 117) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 54 : ((char)(imm)>0x6 ? 70 : 118) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 55 : ((char)(imm)>0x7 ? 71 : 119) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 56 : ((char)(imm)>0x8 ? 72 : 120) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 57 : ((char)(imm)>0x9 ? 73 : 121) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 58 : ((char)(imm)>0xA ? 74 : 122) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 59 : ((char)(imm)>0xB ? 75 : 123) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 60 : ((char)(imm)>0xC ? 76 : 124) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 61 : ((char)(imm)>0xD ? 77 : 125) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 62 : ((char)(imm)>0xE ? 78 : 126) - (char)(imm), \
+ ((char)(imm)&0xF0) ? 63 : ((char)(imm)>0xF ? 79 : 127) - (char)(imm)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srlv_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srlv_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srlv_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srav_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srav_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srav_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sra_epi16 (__m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
+ (__v8hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sra_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
+ (__v8hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sra_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
+ (__v8hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) __U);
+}
+
+#define _mm512_srai_epi16(A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+ (__v32hi)_mm512_setzero_hi(), \
+ (__mmask32)-1); })
+
+#define _mm512_mask_srai_epi16(W, U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+ (__v32hi)(__m512i)(W), \
+ (__mmask32)(U)); })
+
+#define _mm512_maskz_srai_epi16(U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \
+ (__v32hi)_mm512_setzero_hi(), \
+ (__mmask32)(U)); })
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srl_epi16 (__m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
+ (__v8hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srl_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
+ (__v8hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srl_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
+ (__v8hi) __B,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) __U);
+}
+
+#define _mm512_srli_epi16(A, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(imm), \
+ (__v32hi)_mm512_setzero_hi(), \
+ (__mmask32)-1); })
+
+#define _mm512_mask_srli_epi16(W, U, A, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(imm), \
+ (__v32hi)(__m512i)(W), \
+ (__mmask32)(U)); })
+
+#define _mm512_maskz_srli_epi16(U, A, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(imm), \
+ (__v32hi)_mm512_setzero_hi(), \
+ (__mmask32)(U)); })
+
+#define _mm512_bsrli_epi128(a, imm) __extension__ ({ \
+ (__m512i)__builtin_shufflevector( \
+ (__v64qi)(__m512i)(a), \
+ (__v64qi)_mm512_setzero_si512(), \
+ ((char)(imm)&0xF0) ? 64 : (char)(imm) + ((char)(imm)>0xF ? 48 : 0), \
+ ((char)(imm)&0xF0) ? 65 : (char)(imm) + ((char)(imm)>0xE ? 49 : 1), \
+ ((char)(imm)&0xF0) ? 66 : (char)(imm) + ((char)(imm)>0xD ? 50 : 2), \
+ ((char)(imm)&0xF0) ? 67 : (char)(imm) + ((char)(imm)>0xC ? 51 : 3), \
+ ((char)(imm)&0xF0) ? 68 : (char)(imm) + ((char)(imm)>0xB ? 52 : 4), \
+ ((char)(imm)&0xF0) ? 69 : (char)(imm) + ((char)(imm)>0xA ? 53 : 5), \
+ ((char)(imm)&0xF0) ? 70 : (char)(imm) + ((char)(imm)>0x9 ? 54 : 6), \
+ ((char)(imm)&0xF0) ? 71 : (char)(imm) + ((char)(imm)>0x8 ? 55 : 7), \
+ ((char)(imm)&0xF0) ? 72 : (char)(imm) + ((char)(imm)>0x7 ? 56 : 8), \
+ ((char)(imm)&0xF0) ? 73 : (char)(imm) + ((char)(imm)>0x6 ? 57 : 9), \
+ ((char)(imm)&0xF0) ? 74 : (char)(imm) + ((char)(imm)>0x5 ? 58 : 10), \
+ ((char)(imm)&0xF0) ? 75 : (char)(imm) + ((char)(imm)>0x4 ? 59 : 11), \
+ ((char)(imm)&0xF0) ? 76 : (char)(imm) + ((char)(imm)>0x3 ? 60 : 12), \
+ ((char)(imm)&0xF0) ? 77 : (char)(imm) + ((char)(imm)>0x2 ? 61 : 13), \
+ ((char)(imm)&0xF0) ? 78 : (char)(imm) + ((char)(imm)>0x1 ? 62 : 14), \
+ ((char)(imm)&0xF0) ? 79 : (char)(imm) + ((char)(imm)>0x0 ? 63 : 15), \
+ ((char)(imm)&0xF0) ? 80 : (char)(imm) + ((char)(imm)>0xF ? 64 : 16), \
+ ((char)(imm)&0xF0) ? 81 : (char)(imm) + ((char)(imm)>0xE ? 65 : 17), \
+ ((char)(imm)&0xF0) ? 82 : (char)(imm) + ((char)(imm)>0xD ? 66 : 18), \
+ ((char)(imm)&0xF0) ? 83 : (char)(imm) + ((char)(imm)>0xC ? 67 : 19), \
+ ((char)(imm)&0xF0) ? 84 : (char)(imm) + ((char)(imm)>0xB ? 68 : 20), \
+ ((char)(imm)&0xF0) ? 85 : (char)(imm) + ((char)(imm)>0xA ? 69 : 21), \
+ ((char)(imm)&0xF0) ? 86 : (char)(imm) + ((char)(imm)>0x9 ? 70 : 22), \
+ ((char)(imm)&0xF0) ? 87 : (char)(imm) + ((char)(imm)>0x8 ? 71 : 23), \
+ ((char)(imm)&0xF0) ? 88 : (char)(imm) + ((char)(imm)>0x7 ? 72 : 24), \
+ ((char)(imm)&0xF0) ? 89 : (char)(imm) + ((char)(imm)>0x6 ? 73 : 25), \
+ ((char)(imm)&0xF0) ? 90 : (char)(imm) + ((char)(imm)>0x5 ? 74 : 26), \
+ ((char)(imm)&0xF0) ? 91 : (char)(imm) + ((char)(imm)>0x4 ? 75 : 27), \
+ ((char)(imm)&0xF0) ? 92 : (char)(imm) + ((char)(imm)>0x3 ? 76 : 28), \
+ ((char)(imm)&0xF0) ? 93 : (char)(imm) + ((char)(imm)>0x2 ? 77 : 29), \
+ ((char)(imm)&0xF0) ? 94 : (char)(imm) + ((char)(imm)>0x1 ? 78 : 30), \
+ ((char)(imm)&0xF0) ? 95 : (char)(imm) + ((char)(imm)>0x0 ? 79 : 31), \
+ ((char)(imm)&0xF0) ? 96 : (char)(imm) + ((char)(imm)>0xF ? 80 : 32), \
+ ((char)(imm)&0xF0) ? 97 : (char)(imm) + ((char)(imm)>0xE ? 81 : 33), \
+ ((char)(imm)&0xF0) ? 98 : (char)(imm) + ((char)(imm)>0xD ? 82 : 34), \
+ ((char)(imm)&0xF0) ? 99 : (char)(imm) + ((char)(imm)>0xC ? 83 : 35), \
+ ((char)(imm)&0xF0) ? 100 : (char)(imm) + ((char)(imm)>0xB ? 84 : 36), \
+ ((char)(imm)&0xF0) ? 101 : (char)(imm) + ((char)(imm)>0xA ? 85 : 37), \
+ ((char)(imm)&0xF0) ? 102 : (char)(imm) + ((char)(imm)>0x9 ? 86 : 38), \
+ ((char)(imm)&0xF0) ? 103 : (char)(imm) + ((char)(imm)>0x8 ? 87 : 39), \
+ ((char)(imm)&0xF0) ? 104 : (char)(imm) + ((char)(imm)>0x7 ? 88 : 40), \
+ ((char)(imm)&0xF0) ? 105 : (char)(imm) + ((char)(imm)>0x6 ? 89 : 41), \
+ ((char)(imm)&0xF0) ? 106 : (char)(imm) + ((char)(imm)>0x5 ? 90 : 42), \
+ ((char)(imm)&0xF0) ? 107 : (char)(imm) + ((char)(imm)>0x4 ? 91 : 43), \
+ ((char)(imm)&0xF0) ? 108 : (char)(imm) + ((char)(imm)>0x3 ? 92 : 44), \
+ ((char)(imm)&0xF0) ? 109 : (char)(imm) + ((char)(imm)>0x2 ? 93 : 45), \
+ ((char)(imm)&0xF0) ? 110 : (char)(imm) + ((char)(imm)>0x1 ? 94 : 46), \
+ ((char)(imm)&0xF0) ? 111 : (char)(imm) + ((char)(imm)>0x0 ? 95 : 47), \
+ ((char)(imm)&0xF0) ? 112 : (char)(imm) + ((char)(imm)>0xF ? 96 : 48), \
+ ((char)(imm)&0xF0) ? 113 : (char)(imm) + ((char)(imm)>0xE ? 97 : 49), \
+ ((char)(imm)&0xF0) ? 114 : (char)(imm) + ((char)(imm)>0xD ? 98 : 50), \
+ ((char)(imm)&0xF0) ? 115 : (char)(imm) + ((char)(imm)>0xC ? 99 : 51), \
+ ((char)(imm)&0xF0) ? 116 : (char)(imm) + ((char)(imm)>0xB ? 100 : 52), \
+ ((char)(imm)&0xF0) ? 117 : (char)(imm) + ((char)(imm)>0xA ? 101 : 53), \
+ ((char)(imm)&0xF0) ? 118 : (char)(imm) + ((char)(imm)>0x9 ? 102 : 54), \
+ ((char)(imm)&0xF0) ? 119 : (char)(imm) + ((char)(imm)>0x8 ? 103 : 55), \
+ ((char)(imm)&0xF0) ? 120 : (char)(imm) + ((char)(imm)>0x7 ? 104 : 56), \
+ ((char)(imm)&0xF0) ? 121 : (char)(imm) + ((char)(imm)>0x6 ? 105 : 57), \
+ ((char)(imm)&0xF0) ? 122 : (char)(imm) + ((char)(imm)>0x5 ? 106 : 58), \
+ ((char)(imm)&0xF0) ? 123 : (char)(imm) + ((char)(imm)>0x4 ? 107 : 59), \
+ ((char)(imm)&0xF0) ? 124 : (char)(imm) + ((char)(imm)>0x3 ? 108 : 60), \
+ ((char)(imm)&0xF0) ? 125 : (char)(imm) + ((char)(imm)>0x2 ? 109 : 61), \
+ ((char)(imm)&0xF0) ? 126 : (char)(imm) + ((char)(imm)>0x1 ? 110 : 62), \
+ ((char)(imm)&0xF0) ? 127 : (char)(imm) + ((char)(imm)>0x0 ? 111 : 63)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
+ (__v32hi) __A,
+ (__v32hi) __W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
+ (__v32hi) __A,
+ (__v32hi) _mm512_setzero_hi ());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
+ (__v64qi) __A,
+ (__v64qi) __W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
+ (__v64qi) __A,
+ (__v64qi) _mm512_setzero_hi ());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A)
+{
+ return (__m512i) __builtin_ia32_pbroadcastb512_gpr_mask (__A,
+ (__v64qi) __O,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
+{
+ return (__m512i) __builtin_ia32_pbroadcastb512_gpr_mask (__A,
+ (__v64qi)
+ _mm512_setzero_qi(),
+ __M);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
+{
+ return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
+ (__mmask64) __B);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
+{
+ return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
+ (__mmask32) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
+ (__v64qi)
+ _mm512_setzero_hi (),
+ (__mmask64) __U);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
+{
+ __builtin_ia32_storedquhi512_mask ((__v32hi *) __P,
+ (__v32hi) __A,
+ (__mmask32) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A)
+{
+ __builtin_ia32_storedquqi512_mask ((__v64qi *) __P,
+ (__v64qi) __A,
+ (__mmask64) __U);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_test_epi8_mask (__m512i __A, __m512i __B)
+{
+ return (__mmask64) __builtin_ia32_ptestmb512 ((__v64qi) __A,
+ (__v64qi) __B,
+ (__mmask64) -1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__mmask64) __builtin_ia32_ptestmb512 ((__v64qi) __A,
+ (__v64qi) __B, __U);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_test_epi16_mask (__m512i __A, __m512i __B)
+{
+ return (__mmask32) __builtin_ia32_ptestmw512 ((__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) -1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__mmask32) __builtin_ia32_ptestmw512 ((__v32hi) __A,
+ (__v32hi) __B, __U);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_testn_epi8_mask (__m512i __A, __m512i __B)
+{
+ return (__mmask64) __builtin_ia32_ptestnmb512 ((__v64qi) __A,
+ (__v64qi) __B,
+ (__mmask64) -1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__mmask64) __builtin_ia32_ptestnmb512 ((__v64qi) __A,
+ (__v64qi) __B, __U);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_testn_epi16_mask (__m512i __A, __m512i __B)
+{
+ return (__mmask32) __builtin_ia32_ptestnmw512 ((__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) -1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__mmask32) __builtin_ia32_ptestnmw512 ((__v32hi) __A,
+ (__v32hi) __B, __U);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_movepi8_mask (__m512i __A)
+{
+ return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_movepi16_mask (__m512i __A)
+{
+ return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_movm_epi8 (__mmask64 __A)
+{
+ return (__m512i) __builtin_ia32_cvtmask2b512 (__A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_movm_epi16 (__mmask32 __A)
+{
+ return (__m512i) __builtin_ia32_cvtmask2w512 (__A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastb_epi8 (__m128i __A)
+{
+ return (__m512i)__builtin_shufflevector((__v16qi) __A,
+ (__v16qi)_mm_undefined_si128(),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A)
+{
+ return (__m512i)__builtin_ia32_selectb_512(__M,
+ (__v64qi) _mm512_broadcastb_epi8(__A),
+ (__v64qi) __O);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A)
+{
+ return (__m512i)__builtin_ia32_selectb_512(__M,
+ (__v64qi) _mm512_broadcastb_epi8(__A),
+ (__v64qi) _mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A)
+{
+ return (__m512i) __builtin_ia32_pbroadcastw512_gpr_mask (__A,
+ (__v32hi) __O,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_set1_epi16 (__mmask32 __M, short __A)
+{
+ return (__m512i) __builtin_ia32_pbroadcastw512_gpr_mask (__A,
+ (__v32hi) _mm512_setzero_hi(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastw_epi16 (__m128i __A)
+{
+ return (__m512i)__builtin_shufflevector((__v8hi) __A,
+ (__v8hi)_mm_undefined_si128(),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A)
+{
+ return (__m512i)__builtin_ia32_selectw_512(__M,
+ (__v32hi) _mm512_broadcastw_epi16(__A),
+ (__v32hi) __O);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A)
+{
+ return (__m512i)__builtin_ia32_selectw_512(__M,
+ (__v32hi) _mm512_broadcastw_epi16(__A),
+ (__v32hi) _mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutexvar_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
+ (__v32hi) __A,
+ (__v32hi) _mm512_undefined_epi32 (),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
+ (__v32hi) __A,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
+ (__v32hi) __A,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+#define _mm512_alignr_epi8(A, B, N) __extension__ ({\
+ (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(N), \
+ (__v64qi)_mm512_undefined_pd(), \
+ (__mmask64)-1); })
+
+#define _mm512_mask_alignr_epi8(W, U, A, B, N) __extension__({\
+ (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(N), \
+ (__v64qi)(__m512i)(W), \
+ (__mmask64)(U)); })
+
+#define _mm512_maskz_alignr_epi8(U, A, B, N) __extension__({\
+ (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(N), \
+ (__v64qi)_mm512_setzero_si512(), \
+ (__mmask64)(U)); })
+
+#define _mm512_dbsad_epu8(A, B, imm) __extension__ ({\
+ (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(imm), \
+ (__v32hi)_mm512_undefined_epi32(), \
+ (__mmask32)-1); })
+
+#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) ({\
+ (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(imm), \
+ (__v32hi)(__m512i)(W), \
+ (__mmask32)(U)); })
+
+#define _mm512_maskz_dbsad_epu8(U, A, B, imm) ({\
+ (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(imm), \
+ (__v32hi)_mm512_setzero_hi(), \
+ (__mmask32)(U)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sad_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A,
+ (__v64qi) __B);
+}
+
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/avx512cdintrin.h b/current/clang-include/avx512cdintrin.h
new file mode 100644
index 0000000..23c4235
--- /dev/null
+++ b/current/clang-include/avx512cdintrin.h
@@ -0,0 +1,144 @@
+/*===------------- avx512cdintrin.h - AVX512CD intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512cdintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512CDINTRIN_H
+#define __AVX512CDINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512cd")))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_conflict_epi64 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_conflict_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_conflict_epi64 (__mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_conflict_epi32 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_conflict_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_lzcnt_epi32 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_lzcnt_epi64 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_lzcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_lzcnt_epi64 (__mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastmb_epi64 (__mmask8 __A)
+{
+ return (__m512i) __builtin_ia32_broadcastmb512 (__A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastmw_epi32 (__mmask16 __A)
+{
+ return (__m512i) __builtin_ia32_broadcastmw512 (__A);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/avx512dqintrin.h b/current/clang-include/avx512dqintrin.h
new file mode 100644
index 0000000..13665e4
--- /dev/null
+++ b/current/clang-include/avx512dqintrin.h
@@ -0,0 +1,1331 @@
+/*===---- avx512dqintrin.h - AVX512DQ intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512dqintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512DQINTRIN_H
+#define __AVX512DQINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq")))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v8du) __A * (__v8du) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_xor_pd (__m512d __A, __m512d __B) {
+ return (__m512d) ((__v8du) __A ^ (__v8du) __B);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_xor_ps (__m512 __A, __m512 __B) {
+ return (__m512) ((__v16su) __A ^ (__v16su) __B);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_or_pd (__m512d __A, __m512d __B) {
+ return (__m512d) ((__v8du) __A | (__v8du) __B);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_or_ps (__m512 __A, __m512 __B) {
+ return (__m512) ((__v16su) __A | (__v16su) __B);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_and_pd (__m512d __A, __m512d __B) {
+ return (__m512d) ((__v8du) __A & (__v8du) __B);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_and_ps (__m512 __A, __m512 __B) {
+ return (__m512) ((__v16su) __A & (__v16su) __B);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_andnot_pd (__m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_andnot_ps (__m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epi64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundpd_epi64(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundpd_epi64(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundpd_epi64(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epu64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundpd_epu64(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundpd_epu64(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundpd_epu64(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epi64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_epi64(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundps_epi64(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundps_epi64(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epu64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_epu64(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundps_epu64(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundps_epu64(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_pd (__m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi64_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundepi64_pd(W, U, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundepi64_pd(U, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_ps (__m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi64_ps(A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundepi64_ps(W, U, A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)(__m256)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_cvt_roundepi64_ps(U, A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epi64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epi64(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundpd_epi64(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epu64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epu64(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundpd_epu64(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epi64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundps_epi64(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundps_epi64(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundps_epi64(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epu64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundps_epu64(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundps_epu64(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundps_epu64(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepu64_pd (__m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu64_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundepu64_pd(W, U, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_maskz_cvt_roundepu64_pd(U, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_cvtepu64_ps (__m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu64_ps(A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundepu64_ps(W, U, A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)(__m256)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_cvt_roundepu64_ps(U, A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_range_pd(A, B, C) __extension__ ({ \
+ (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_range_pd(W, U, A, B, C) __extension__ ({ \
+ (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_range_pd(U, A, B, C) __extension__ ({ \
+ (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_range_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_range_round_pd(W, U, A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_range_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_range_ps(A, B, C) __extension__ ({ \
+ (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_range_ps(W, U, A, B, C) __extension__ ({ \
+ (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_range_ps(U, A, B, C) __extension__ ({ \
+ (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_range_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_range_round_ps(W, U, A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_range_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm_range_round_ss(A, B, C, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8) -1, (int)(C),\
+ (int)(R)); })
+
+#define _mm_range_ss(A ,B , C) _mm_range_round_ss(A, B, C ,_MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_range_round_ss(W, U, A, B, C, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W),\
+ (__mmask8)(U), (int)(C),\
+ (int)(R)); })
+
+#define _mm_mask_range_ss(W , U, A, B, C) _mm_mask_range_round_ss(W, U, A, B, C , _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_range_round_ss(U, A, B, C, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(C),\
+ (int)(R)); })
+
+#define _mm_maskz_range_ss(U, A ,B , C) _mm_maskz_range_round_ss(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
+
+#define _mm_range_round_sd(A, B, C, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8) -1, (int)(C),\
+ (int)(R)); })
+
+#define _mm_range_sd(A ,B , C) _mm_range_round_sd(A, B, C ,_MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_range_round_sd(W, U, A, B, C, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W),\
+ (__mmask8)(U), (int)(C),\
+ (int)(R)); })
+
+#define _mm_mask_range_sd(W, U, A, B, C) _mm_mask_range_round_sd(W, U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_range_round_sd(U, A, B, C, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(C),\
+ (int)(R)); })
+
+#define _mm_maskz_range_sd(U, A, B, C) _mm_maskz_range_round_sd(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_reduce_pd(A, B) __extension__ ({ \
+ (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+ (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_reduce_pd(U, A, B) __extension__ ({ \
+ (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_reduce_ps(A, B) __extension__ ({ \
+ (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_reduce_ps(W, U, A, B) __extension__ ({ \
+ (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_reduce_ps(U, A, B) __extension__ ({ \
+ (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_reduce_round_pd(A, B, R) __extension__ ({\
+ (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_reduce_round_pd(W, U, A, B, R) __extension__ ({\
+ (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_reduce_round_pd(U, A, B, R) __extension__ ({\
+ (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_reduce_round_ps(A, B, R) __extension__ ({\
+ (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_reduce_round_ps(W, U, A, B, R) __extension__ ({\
+ (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_reduce_round_ps(U, A, B, R) __extension__ ({\
+ (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm_reduce_ss(A, B, C) __extension__ ({ \
+ (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
+ (int)(C), _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_reduce_ss(W, U, A, B, C) __extension__ ({ \
+ (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(C), _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_reduce_ss(U, A, B, C) __extension__ ({ \
+ (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(C), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_reduce_round_ss(A, B, C, R) __extension__ ({ \
+ (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
+ (int)(C), (int)(R)); })
+
+#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) __extension__ ({ \
+ (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(C), (int)(R)); })
+
+#define _mm_maskz_reduce_round_ss(U, A, B, C, R) __extension__ ({ \
+ (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(C), (int)(R)); })
+
+#define _mm_reduce_sd(A, B, C) __extension__ ({ \
+ (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(C), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_reduce_sd(W, U, A, B, C) __extension__ ({ \
+ (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (int)(C), _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_reduce_sd(U, A, B, C) __extension__ ({ \
+ (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(C), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_reduce_round_sd(A, B, C, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(C), (int)(R)); })
+
+#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (int)(C), (int)(R)); })
+
+#define _mm_maskz_reduce_round_sd(U, A, B, C, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(C), (int)(R)); })
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_movepi32_mask (__m512i __A)
+{
+ return (__mmask16) __builtin_ia32_cvtd2mask512 ((__v16si) __A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_movm_epi32 (__mmask16 __A)
+{
+ return (__m512i) __builtin_ia32_cvtmask2d512 (__A);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_movm_epi64 (__mmask8 __A)
+{
+ return (__m512i) __builtin_ia32_cvtmask2q512 (__A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_movepi64_mask (__m512i __A)
+{
+ return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A);
+}
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_broadcast_f32x2 (__m128 __A)
+{
+ return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
+ (__v16sf)_mm512_undefined_ps(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A)
+{
+ return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
+ (__v16sf)
+ __O, __M);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A)
+{
+ return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
+ (__v16sf)_mm512_setzero_ps (),
+ __M);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_broadcast_f32x8 (__m256 __A)
+{
+ return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
+ _mm512_undefined_ps(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_f32x8 (__m512 __O, __mmask16 __M, __m256 __A)
+{
+ return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
+ (__v16sf)__O,
+ __M);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_f32x8 (__mmask16 __M, __m256 __A)
+{
+ return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
+ (__v16sf)_mm512_setzero_ps (),
+ __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_broadcast_f64x2 (__m128d __A)
+{
+ return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A,
+ (__v8df)_mm512_undefined_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_f64x2 (__m512d __O, __mmask8 __M, __m128d __A)
+{
+ return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A,
+ (__v8df)
+ __O, __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
+{
+ return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A,
+ (__v8df)_mm512_setzero_ps (),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcast_i32x2 (__m128i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A,
+ (__v16si)_mm512_setzero_si512(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A,
+ (__v16si)
+ __O, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A,
+ (__v16si)_mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcast_i32x8 (__m256i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A,
+ (__v16si)_mm512_setzero_si512(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_i32x8 (__m512i __O, __mmask16 __M, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A,
+ (__v16si)__O,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_i32x8 (__mmask16 __M, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcast_i64x2 (__m128i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A,
+ (__v8di)_mm512_setzero_si512(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_i64x2 (__m512i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A,
+ (__v8di)
+ __O, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A,
+ (__v8di)_mm512_setzero_si512 (),
+ __M);
+}
+
+#define _mm512_extractf32x8_ps(A, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_extractf32x8_ps(W, U, A, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_extractf32x8_ps(U, A, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)); })
+
+#define _mm512_extractf64x2_pd(A, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+ (int)(imm), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm512_extracti32x8_epi32(A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_extracti32x8_epi32(U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+#define _mm512_extracti64x2_epi64(A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+ (int)(imm), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+ (int)(imm), \
+ (__v2di)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+ (int)(imm), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)(U)); })
+
+#define _mm512_insertf32x8(A, B, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_insertf32x8_mask((__v16sf)(__m512)(A), \
+ (__v8sf)(__m256)(B), (int)(imm), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_insertf32x8(W, U, A, B, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_insertf32x8_mask((__v16sf)(__m512)(A), \
+ (__v8sf)(__m256)(B), (int)(imm), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_insertf32x8(U, A, B, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_insertf32x8_mask((__v16sf)(__m512)(A), \
+ (__v8sf)(__m256)(B), (int)(imm), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U)); })
+
+#define _mm512_insertf64x2(A, B, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_insertf64x2_512_mask((__v8df)(__m512d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(imm), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_insertf64x2_512_mask((__v8df)(__m512d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(imm), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_insertf64x2_512_mask((__v8df)(__m512d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(imm), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm512_inserti32x8(A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti32x8_mask((__v16si)(__m512i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_inserti32x8(W, U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti32x8_mask((__v16si)(__m512i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_inserti32x8(U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti32x8_mask((__v16si)(__m512i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U)); })
+
+#define _mm512_inserti64x2(A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti64x2_512_mask((__v8di)(__m512i)(A), \
+ (__v2di)(__m128i)(B), \
+ (int)(imm), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti64x2_512_mask((__v8di)(__m512i)(A), \
+ (__v2di)(__m128i)(B), \
+ (int)(imm), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti64x2_512_mask((__v8di)(__m512i)(A), \
+ (__v2di)(__m128i)(B), \
+ (int)(imm), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U)); })
+
+#define _mm512_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+ (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
+ (int)(imm), (__mmask16)(U)); })
+
+#define _mm512_fpclass_ps_mask(A, imm) __extension__ ({ \
+ (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
+ (int)(imm), (__mmask16)-1); })
+
+#define _mm512_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm512_fpclass_pd_mask(A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm_fpclass_sd_mask(A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm_mask_fpclass_sd_mask(U, A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm_fpclass_ss_mask(A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm_mask_fpclass_ss_mask(U, A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__mmask8)(U)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/avx512erintrin.h b/current/clang-include/avx512erintrin.h
new file mode 100644
index 0000000..8ff212c
--- /dev/null
+++ b/current/clang-include/avx512erintrin.h
@@ -0,0 +1,285 @@
+/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512erintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512ERINTRIN_H
+#define __AVX512ERINTRIN_H
+
+// exp2a23
+#define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), (__mmask8)(M), \
+ (int)(R)); })
+
+#define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm512_exp2a23_pd(A) \
+ _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_exp2a23_pd(S, M, A) \
+ _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_exp2a23_pd(M, A) \
+ _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), (__mmask16)(M), \
+ (int)(R)); })
+
+#define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(M), (int)(R)); })
+
+#define _mm512_exp2a23_ps(A) \
+ _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_exp2a23_ps(S, M, A) \
+ _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_exp2a23_ps(M, A) \
+ _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+// rsqrt28
+#define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), (__mmask8)(M), \
+ (int)(R)); })
+
+#define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm512_rsqrt28_pd(A) \
+ _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rsqrt28_pd(S, M, A) \
+ _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rsqrt28_pd(M, A) \
+ _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), (__mmask16)(M), \
+ (int)(R)); })
+
+#define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(M), (int)(R)); })
+
+#define _mm512_rsqrt28_ps(A) \
+ _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rsqrt28_ps(S, M, A) \
+ _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rsqrt28_ps(M, A) \
+ _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(S), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm_rsqrt28_ss(A, B) \
+ _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rsqrt28_ss(S, M, A, B) \
+ _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rsqrt28_ss(M, A, B) \
+ _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(S), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm_rsqrt28_sd(A, B) \
+ _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rsqrt28_sd(S, M, A, B) \
+ _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rsqrt28_sd(M, A, B) \
+ _mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+// rcp28
+#define _mm512_rcp28_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), (__mmask8)(M), \
+ (int)(R)); })
+
+#define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm512_rcp28_pd(A) \
+ _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rcp28_pd(S, M, A) \
+ _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rcp28_pd(M, A) \
+ _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_rcp28_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), (__mmask16)(M), \
+ (int)(R)); })
+
+#define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(M), (int)(R)); })
+
+#define _mm512_rcp28_ps(A) \
+ _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rcp28_ps(S, M, A) \
+ _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rcp28_ps(M, A) \
+ _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(S), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm_rcp28_ss(A, B) \
+ _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rcp28_ss(S, M, A, B) \
+ _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rcp28_ss(M, A, B) \
+ _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(S), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm_rcp28_sd(A, B) \
+ _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rcp28_sd(S, M, A, B) \
+ _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rcp28_sd(M, A, B) \
+ _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#endif // __AVX512ERINTRIN_H
diff --git a/current/clang-include/avx512fintrin.h b/current/clang-include/avx512fintrin.h
new file mode 100644
index 0000000..badc436
--- /dev/null
+++ b/current/clang-include/avx512fintrin.h
@@ -0,0 +1,9543 @@
+/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512FINTRIN_H
+#define __AVX512FINTRIN_H
+
+typedef char __v64qi __attribute__((__vector_size__(64)));
+typedef short __v32hi __attribute__((__vector_size__(64)));
+typedef double __v8df __attribute__((__vector_size__(64)));
+typedef float __v16sf __attribute__((__vector_size__(64)));
+typedef long long __v8di __attribute__((__vector_size__(64)));
+typedef int __v16si __attribute__((__vector_size__(64)));
+
+/* Unsigned types */
+typedef unsigned char __v64qu __attribute__((__vector_size__(64)));
+typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
+typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
+typedef unsigned int __v16su __attribute__((__vector_size__(64)));
+
+typedef float __m512 __attribute__((__vector_size__(64)));
+typedef double __m512d __attribute__((__vector_size__(64)));
+typedef long long __m512i __attribute__((__vector_size__(64)));
+
+typedef unsigned char __mmask8;
+typedef unsigned short __mmask16;
+
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+typedef enum
+{
+ _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02,
+ _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05,
+ _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08,
+ _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B,
+ _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E,
+ _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11,
+ _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14,
+ _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17,
+ _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A,
+ _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D,
+ _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20,
+ _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23,
+ _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26,
+ _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29,
+ _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C,
+ _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F,
+ _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32,
+ _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35,
+ _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38,
+ _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B,
+ _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E,
+ _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41,
+ _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44,
+ _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47,
+ _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A,
+ _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D,
+ _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50,
+ _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53,
+ _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56,
+ _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59,
+ _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C,
+ _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F,
+ _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62,
+ _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65,
+ _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68,
+ _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B,
+ _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E,
+ _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71,
+ _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74,
+ _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77,
+ _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A,
+ _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D,
+ _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80,
+ _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83,
+ _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86,
+ _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89,
+ _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C,
+ _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F,
+ _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92,
+ _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95,
+ _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98,
+ _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B,
+ _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E,
+ _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1,
+ _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4,
+ _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7,
+ _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA,
+ _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD,
+ _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0,
+ _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3,
+ _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6,
+ _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9,
+ _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC,
+ _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF,
+ _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2,
+ _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5,
+ _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8,
+ _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB,
+ _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE,
+ _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1,
+ _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4,
+ _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7,
+ _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA,
+ _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD,
+ _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0,
+ _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3,
+ _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6,
+ _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9,
+ _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC,
+ _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF,
+ _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2,
+ _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5,
+ _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8,
+ _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB,
+ _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE,
+ _MM_PERM_DDDD = 0xFF
+} _MM_PERM_ENUM;
+
+typedef enum
+{
+ _MM_MANT_NORM_1_2, /* interval [1, 2) */
+ _MM_MANT_NORM_p5_2, /* interval [0.5, 2) */
+ _MM_MANT_NORM_p5_1, /* interval [0.5, 1) */
+ _MM_MANT_NORM_p75_1p5 /* interval [0.75, 1.5) */
+} _MM_MANTISSA_NORM_ENUM;
+
+typedef enum
+{
+ _MM_MANT_SIGN_src, /* sign = sign(SRC) */
+ _MM_MANT_SIGN_zero, /* sign = 0 */
+ _MM_MANT_SIGN_nan /* DEST = NaN if sign(SRC) = 1 */
+} _MM_MANTISSA_SIGN_ENUM;
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
+
+/* Create vectors with repeated elements */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_setzero_si512(void)
+{
+ return (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+#define _mm512_setzero_epi32 _mm512_setzero_si512
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_undefined_pd(void)
+{
+ return (__m512d)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_undefined(void)
+{
+ return (__m512)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_undefined_ps(void)
+{
+ return (__m512)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_undefined_epi32(void)
+{
+ return (__m512i)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastd_epi32 (__m128i __A)
+{
+ return (__m512i)__builtin_shufflevector((__v4si) __A,
+ (__v4si)_mm_undefined_si128(),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A)
+{
+ return (__m512i)__builtin_ia32_selectd_512(__M,
+ (__v16si) _mm512_broadcastd_epi32(__A),
+ (__v16si) __O);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A)
+{
+ return (__m512i)__builtin_ia32_selectd_512(__M,
+ (__v16si) _mm512_broadcastd_epi32(__A),
+ (__v16si) _mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcastq_epi64 (__m128i __A)
+{
+ return (__m512i)__builtin_shufflevector((__v2di) __A,
+ (__v2di) _mm_undefined_si128(),
+ 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m512i)__builtin_ia32_selectq_512(__M,
+ (__v8di) _mm512_broadcastq_epi64(__A),
+ (__v8di) __O);
+
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
+{
+ return (__m512i)__builtin_ia32_selectq_512(__M,
+ (__v8di) _mm512_broadcastq_epi64(__A),
+ (__v8di) _mm512_setzero_si512());
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
+{
+ return (__m512i) __builtin_ia32_pbroadcastd512_gpr_mask (__A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
+{
+#ifdef __x86_64__
+ return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+#else
+ return (__m512i) __builtin_ia32_pbroadcastq512_mem_mask (__A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+#endif
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_setzero_ps(void)
+{
+ return (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+}
+
+#define _mm512_setzero _mm512_setzero_ps
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_setzero_pd(void)
+{
+ return (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_set1_ps(float __w)
+{
+ return (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_set1_pd(double __w)
+{
+ return (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set1_epi8(char __w)
+{
+ return (__m512i)(__v64qi){ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set1_epi16(short __w)
+{
+ return (__m512i)(__v32hi){ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set1_epi32(int __s)
+{
+ return (__m512i)(__v16si){ __s, __s, __s, __s, __s, __s, __s, __s,
+ __s, __s, __s, __s, __s, __s, __s, __s };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set1_epi64(long long __d)
+{
+ return (__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_broadcastss_ps(__m128 __A)
+{
+ return (__m512)__builtin_shufflevector((__v4sf) __A,
+ (__v4sf)_mm_undefined_ps(),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
+{
+ return (__m512i)(__v16si)
+ { __D, __C, __B, __A, __D, __C, __B, __A,
+ __D, __C, __B, __A, __D, __C, __B, __A };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set4_epi64 (long long __A, long long __B, long long __C,
+ long long __D)
+{
+ return (__m512i) (__v8di)
+ { __D, __C, __B, __A, __D, __C, __B, __A };
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_set4_pd (double __A, double __B, double __C, double __D)
+{
+ return (__m512d)
+ { __D, __C, __B, __A, __D, __C, __B, __A };
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_set4_ps (float __A, float __B, float __C, float __D)
+{
+ return (__m512)
+ { __D, __C, __B, __A, __D, __C, __B, __A,
+ __D, __C, __B, __A, __D, __C, __B, __A };
+}
+
+#define _mm512_setr4_epi32(e0,e1,e2,e3) \
+ _mm512_set4_epi32((e3),(e2),(e1),(e0))
+
+#define _mm512_setr4_epi64(e0,e1,e2,e3) \
+ _mm512_set4_epi64((e3),(e2),(e1),(e0))
+
+#define _mm512_setr4_pd(e0,e1,e2,e3) \
+ _mm512_set4_pd((e3),(e2),(e1),(e0))
+
+#define _mm512_setr4_ps(e0,e1,e2,e3) \
+ _mm512_set4_ps((e3),(e2),(e1),(e0))
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_broadcastsd_pd(__m128d __A)
+{
+ return (__m512d)__builtin_shufflevector((__v2df) __A,
+ (__v2df) _mm_undefined_pd(),
+ 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/* Cast between vector types */
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_castpd256_pd512(__m256d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_castps256_ps512(__m256 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+}
+
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm512_castpd512_pd128(__m512d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm512_castpd512_pd256 (__m512d __A)
+{
+ return __builtin_shufflevector(__A, __A, 0, 1, 2, 3);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm512_castps512_ps128(__m512 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm512_castps512_ps256 (__m512 __A)
+{
+ return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_castpd_ps (__m512d __A)
+{
+ return (__m512) (__A);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_castpd_si512 (__m512d __A)
+{
+ return (__m512i) (__A);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_castpd128_pd512 (__m128d __A)
+{
+ return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_castps_pd (__m512 __A)
+{
+ return (__m512d) (__A);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_castps_si512 (__m512 __A)
+{
+ return (__m512i) (__A);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_castps128_ps512 (__m128 __A)
+{
+ return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_castsi128_si512 (__m128i __A)
+{
+ return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_castsi256_si512 (__m256i __A)
+{
+ return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_castsi512_ps (__m512i __A)
+{
+ return (__m512) (__A);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_castsi512_pd (__m512i __A)
+{
+ return (__m512d) (__A);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm512_castsi512_si128 (__m512i __A)
+{
+ return (__m128i)__builtin_shufflevector(__A, __A , 0, 1);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm512_castsi512_si256 (__m512i __A)
+{
+ return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
+}
+
+/* Bitwise operators */
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_and_epi32(__m512i __a, __m512i __b)
+{
+ return (__m512i)((__v16su)__a & (__v16su)__b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
+ (__v16si) _mm512_and_epi32(__a, __b),
+ (__v16si) __src);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (),
+ __k, __a, __b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_and_epi64(__m512i __a, __m512i __b)
+{
+ return (__m512i)((__v8du)__a & (__v8du)__b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k,
+ (__v8di) _mm512_and_epi64(__a, __b),
+ (__v8di) __src);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (),
+ __k, __a, __b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_andnot_si512 (__m512i __A, __m512i __B)
+{
+ return (__m512i)(~(__v8du)(__A) & (__v8du)__B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_andnot_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i)(~(__v16su)(__A) & (__v16su)__B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_andnot_epi32(__A, __B),
+ (__v16si)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(),
+ __U, __A, __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_andnot_epi64(__m512i __A, __m512i __B)
+{
+ return (__m512i)(~(__v8du)(__A) & (__v8du)__B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_andnot_epi64(__A, __B),
+ (__v8di)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(),
+ __U, __A, __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_or_epi32(__m512i __a, __m512i __b)
+{
+ return (__m512i)((__v16su)__a | (__v16su)__b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
+ (__v16si)_mm512_or_epi32(__a, __b),
+ (__v16si)__src);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_or_epi64(__m512i __a, __m512i __b)
+{
+ return (__m512i)((__v8du)__a | (__v8du)__b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
+ (__v8di)_mm512_or_epi64(__a, __b),
+ (__v8di)__src);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_xor_epi32(__m512i __a, __m512i __b)
+{
+ return (__m512i)((__v16su)__a ^ (__v16su)__b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
+ (__v16si)_mm512_xor_epi32(__a, __b),
+ (__v16si)__src);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_xor_epi64(__m512i __a, __m512i __b)
+{
+ return (__m512i)((__v8du)__a ^ (__v8du)__b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
+ (__v8di)_mm512_xor_epi64(__a, __b),
+ (__v8di)__src);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_and_si512(__m512i __a, __m512i __b)
+{
+ return (__m512i)((__v8du)__a & (__v8du)__b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_or_si512(__m512i __a, __m512i __b)
+{
+ return (__m512i)((__v8du)__a | (__v8du)__b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_xor_si512(__m512i __a, __m512i __b)
+{
+ return (__m512i)((__v8du)__a ^ (__v8du)__b);
+}
+
+/* Arithmetic */
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_add_pd(__m512d __a, __m512d __b)
+{
+ return (__m512d)((__v8df)__a + (__v8df)__b);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_add_ps(__m512 __a, __m512 __b)
+{
+ return (__m512)((__v16sf)__a + (__v16sf)__b);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_mul_pd(__m512d __a, __m512d __b)
+{
+ return (__m512d)((__v8df)__a * (__v8df)__b);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_mul_ps(__m512 __a, __m512 __b)
+{
+ return (__m512)((__v16sf)__a * (__v16sf)__b);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_sub_pd(__m512d __a, __m512d __b)
+{
+ return (__m512d)((__v8df)__a - (__v8df)__b);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_sub_ps(__m512 __a, __m512 __b)
+{
+ return (__m512)((__v16sf)__a - (__v16sf)__b);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v8du) __A + (__v8du) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v8du) __A - (__v8du) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v16su) __A + (__v16su) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v16su) __A - (__v16su) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+#define _mm512_mask_max_round_pd(W, U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_max_round_pd(U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_max_round_pd(A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_max_pd(__m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_max_round_ps(W, U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_max_round_ps(U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_max_round_ps(A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_max_ps(__m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_max_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_max_round_ss(W, U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm_maskz_max_round_ss(U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_max_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_max_round_sd(W, U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_max_round_sd(U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline __m512i
+__DEFAULT_FN_ATTRS
+_mm512_max_epi32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epi64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+#define _mm512_mask_min_round_pd(W, U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_min_round_pd(U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_min_round_pd(A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_min_pd(__m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_min_round_ps(W, U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_min_round_ps(U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_min_round_ps(A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_min_ps(__m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_min_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_min_round_ss(W, U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm_maskz_min_round_ss(U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_min_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_min_round_sd(W, U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_min_round_sd(U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline __m512i
+__DEFAULT_FN_ATTRS
+_mm512_min_epi32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epi64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W, __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mul_epi32(__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mul_epi32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di) __W, __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_epi32 (__mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mul_epu32(__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mul_epu32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di) __W, __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_epu32 (__mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mullo_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v16su) __A * (__v16su) __B);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mullo_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mullo_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W, __M);
+}
+
+#define _mm512_mask_sqrt_round_pd(W, U, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_sqrt_round_pd(U, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_sqrt_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_sqrt_pd(__m512d __a)
+{
+ return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)__a,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_sqrt_round_ps(W, U, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_sqrt_round_ps(U, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_sqrt_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_sqrt_ps(__m512 __a)
+{
+ return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__a,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A)
+{
+ return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_rsqrt14_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_rsqrt14_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt14_ss(__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_rsqrt14_sd(__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_rcp14_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_rcp14_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp14_ss(__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_rcp14_sd(__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_floor_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+ _MM_FROUND_FLOOR,
+ (__v16sf) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+ _MM_FROUND_FLOOR,
+ (__v16sf) __W, __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_floor_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+ _MM_FROUND_FLOOR,
+ (__v8df) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+ _MM_FROUND_FLOOR,
+ (__v8df) __W, __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+ _MM_FROUND_CEIL,
+ (__v16sf) __W, __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_ceil_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+ _MM_FROUND_CEIL,
+ (__v16sf) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_ceil_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+ _MM_FROUND_CEIL,
+ (__v8df) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+ _MM_FROUND_CEIL,
+ (__v8df) __W, __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi64(__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi32(__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_add_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_add_round_ss(W, U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm_maskz_add_round_ss(U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_add_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_add_round_sd(W, U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_add_round_sd(U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_add_round_pd(A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_add_round_pd(W, U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_add_round_pd(U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_add_round_ps(A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_add_round_ps(W, U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_add_round_ps(U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_sub_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_sub_round_ss(W, U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm_maskz_sub_round_ss(U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_sub_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_sub_round_sd(W, U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_sub_round_sd(U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_sub_round_pd(A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_sub_round_pd(W, U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_sub_round_pd(U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_sub_round_ps(A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_sub_round_ps(W, U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ (int)(R)); });
+
+#define _mm512_maskz_sub_round_ps(U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); });
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_mul_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_mul_round_ss(W, U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm_maskz_mul_round_ss(U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mul_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_mul_round_sd(W, U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_mul_round_sd(U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mul_round_pd(A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_mul_round_pd(W, U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_mul_round_pd(U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_mul_round_ps(A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_mul_round_ps(W, U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ (int)(R)); });
+
+#define _mm512_maskz_mul_round_ps(U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); });
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divss_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_div_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_div_round_ss(W, U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm_maskz_div_round_ss(U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divsd_round_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_div_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_div_round_sd(W, U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_div_round_sd(U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_div_pd(__m512d __a, __m512d __b)
+{
+ return (__m512d)((__v8df)__a/(__v8df)__b);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_div_ps(__m512 __a, __m512 __b)
+{
+ return (__m512)((__v16sf)__a/(__v16sf)__b);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_div_round_pd(A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_div_round_pd(W, U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_div_round_pd(U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_div_round_ps(A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_div_round_ps(W, U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ (int)(R)); });
+
+#define _mm512_maskz_div_round_ps(U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); });
+
+#define _mm512_roundscale_ps(A, B) __extension__ ({ \
+ (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)(__m512)(A), (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_roundscale_ps(A, B, C, imm) __extension__ ({\
+ (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
+ (__v16sf)(__m512)(A), (__mmask16)(B), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_roundscale_ps(A, B, imm) __extension__ ({\
+ (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(A), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
+ (__v16sf)(__m512)(A), (__mmask16)(B), \
+ (int)(R)); })
+
+#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(A), (int)(R)); })
+
+#define _mm512_roundscale_round_ps(A, imm, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_roundscale_pd(A, B) __extension__ ({ \
+ (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)(__m512d)(A), (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_roundscale_pd(A, B, C, imm) __extension__ ({\
+ (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
+ (__v8df)(__m512d)(A), (__mmask8)(B), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_roundscale_pd(A, B, imm) __extension__ ({\
+ (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(A), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
+ (__v8df)(__m512d)(A), (__mmask8)(B), \
+ (int)(R)); })
+
+#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(A), (int)(R)); })
+
+#define _mm512_roundscale_round_pd(A, imm, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_fmadd_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), (__mmask8)-1, \
+ (int)(R)); })
+
+
+#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_fmsub_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)); })
+
+
+#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_fnmadd_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), (__mmask8)-1, \
+ (int)(R)); })
+
+
+#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_fnmsub_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)); })
+
+
+#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmadd_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), (__mmask16)-1, \
+ (int)(R)); })
+
+
+#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_fmsub_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)); })
+
+
+#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_fnmadd_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_mask(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), (__mmask16)-1, \
+ (int)(R)); })
+
+
+#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_fnmsub_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_mask(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)); })
+
+
+#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmaddsub_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)); })
+
+
+#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_fmsubadd_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)); })
+
+
+#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmaddsub_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)); })
+
+
+#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_fmsubadd_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)); })
+
+
+#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfnmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfnmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfnmsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_vfnmsubpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfnmsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vfnmsubps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+
+
+/* Vector permutations */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I
+ /* idx */ ,
+ (__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_epi32 (__m512i __A, __mmask16 __U,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I
+ /* idx */ ,
+ (__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_epi32 (__mmask16 __U, __m512i __A,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2vard512_maskz ((__v16si) __I
+ /* idx */ ,
+ (__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I
+ /* idx */ ,
+ (__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_epi64 (__m512i __A, __mmask8 __U, __m512i __I,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I
+ /* idx */ ,
+ (__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) __U);
+}
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_epi64 (__mmask8 __U, __m512i __A,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varq512_maskz ((__v8di) __I
+ /* idx */ ,
+ (__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) __U);
+}
+
+#define _mm512_alignr_epi64(A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(I), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_alignr_epi64(W, U, A, B, imm) __extension__({\
+ (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(imm), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_alignr_epi64(U, A, B, imm) __extension__({\
+ (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(imm), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U)); })
+
+#define _mm512_alignr_epi32(A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(I), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({\
+ (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(imm), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_alignr_epi32(U, A, B, imm) __extension__({\
+ (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(imm), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U)); })
+/* Vector Extract */
+
+#define _mm512_extractf64x4_pd(A, I) __extension__ ({ \
+ (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
+ (__v4df)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_extractf64x4_pd(W, U, A, imm) __extension__ ({\
+ (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_extractf64x4_pd(U, A, imm) __extension__ ({\
+ (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm512_extractf32x4_ps(A, I) __extension__ ({ \
+ (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({\
+ (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_extractf32x4_ps(U, A, imm) __extension__ ({\
+ (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)); })
+/* Vector Blend */
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
+{
+ return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
+ (__v8df) __W,
+ (__v8df) __A);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
+{
+ return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
+ (__v16sf) __W,
+ (__v16sf) __A);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
+ (__v8di) __W,
+ (__v8di) __A);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
+ (__v16si) __W,
+ (__v16si) __A);
+}
+
+/* Compare */
+
+#define _mm512_cmp_round_ps_mask(A, B, P, R) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(P), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(P), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_cmp_ps_mask(A, B, P) \
+ _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_cmp_ps_mask(U, A, B, P) \
+ _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_cmp_round_pd_mask(A, B, P, R) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(P), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(P), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_cmp_pd_mask(A, B, P) \
+ _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_cmp_pd_mask(U, A, B, P) \
+ _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+/* Conversion */
+
+#define _mm512_cvtt_roundps_epu32(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_undefined_epi32(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)); })
+
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epu32(__m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
+ (__v16si) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi32_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_cvt_roundepu32_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_cvtepu32_ps (__m512i __A)
+{
+ return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
+ (__v16sf) _mm512_undefined_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
+{
+ return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_pd(__m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_ps (__m512i __A)
+{
+ return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
+ (__v16sf) _mm512_undefined_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
+{
+ return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepu32_pd(__m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+#define _mm512_cvt_roundpd_ps(A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+ (__v8sf)(__m256)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_cvt_roundpd_ps(U, A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_cvtpd_ps (__m512d __A)
+{
+ return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
+ (__v8sf) _mm256_undefined_ps (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A)
+{
+ return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A)
+{
+ return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
+ (__v8sf) _mm256_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_ph(A, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)_mm256_undefined_si256(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_cvt_roundps_ph(U, W, A, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)(__m256i)(U), \
+ (__mmask16)(W)); })
+
+#define _mm512_maskz_cvt_roundps_ph(W, A, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (__mmask16)(W)); })
+
+#define _mm512_cvtps_ph(A, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_cvtps_ph(U, W, A, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)(__m256i)(U), \
+ (__mmask16)(W)); })
+
+#define _mm512_maskz_cvtps_ph(W, A, I) __extension__ ({\
+ (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (__mmask16)(W)); })
+
+#define _mm512_cvt_roundph_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundph_ps(W, U, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundph_ps(U, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_cvtph_ps(__m256i __A)
+{
+ return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
+{
+ return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
+{
+ return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epi32(A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epi32(__m512d __a)
+{
+ return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a,
+ (__v8si)_mm256_setzero_si256(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
+ (__v8si) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
+ (__v8si) _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundps_epi32(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)); })
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epi32(__m512 __a)
+{
+ return (__m512i)
+ __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
+ (__v16si) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_epi32(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundps_epi32(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epi32 (__m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
+ (__v16si) _mm512_undefined_epi32 (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
+ (__v16si) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundpd_epi32(A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epi32 (__m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
+ (__v8si)
+ _mm256_undefined_si256 (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
+ (__v8si) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_epu32(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundps_epu32(U, A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epu32 ( __m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\
+ (__v16si)\
+ _mm512_undefined_epi32 (),\
+ (__mmask16) -1,\
+ _MM_FROUND_CUR_DIRECTION);\
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
+ (__v16si) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U ,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundpd_epu32(A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epu32 (__m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
+ (__v8si)
+ _mm256_undefined_si256 (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
+ (__v8si) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+/* Unpack and Interleave */
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_unpackhi_pd(__m512d __a, __m512d __b)
+{
+ return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
+ 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+ (__v8df)_mm512_unpackhi_pd(__A, __B),
+ (__v8df)__W);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
+{
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+ (__v8df)_mm512_unpackhi_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_unpacklo_pd(__m512d __a, __m512d __b)
+{
+ return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
+ 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+ (__v8df)_mm512_unpacklo_pd(__A, __B),
+ (__v8df)__W);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+ (__v8df)_mm512_unpacklo_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_unpackhi_ps(__m512 __a, __m512 __b)
+{
+ return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
+ 2, 18, 3, 19,
+ 2+4, 18+4, 3+4, 19+4,
+ 2+8, 18+8, 3+8, 19+8,
+ 2+12, 18+12, 3+12, 19+12);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+ return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
+ (__v16sf)_mm512_unpackhi_ps(__A, __B),
+ (__v16sf)__W);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+ return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
+ (__v16sf)_mm512_unpackhi_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_unpacklo_ps(__m512 __a, __m512 __b)
+{
+ return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
+ 0, 16, 1, 17,
+ 0+4, 16+4, 1+4, 17+4,
+ 0+8, 16+8, 1+8, 17+8,
+ 0+12, 16+12, 1+12, 17+12);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+ return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
+ (__v16sf)_mm512_unpacklo_ps(__A, __B),
+ (__v16sf)__W);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+ return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
+ (__v16sf)_mm512_unpacklo_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi32(__m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
+ 2, 18, 3, 19,
+ 2+4, 18+4, 3+4, 19+4,
+ 2+8, 18+8, 3+8, 19+8,
+ 2+12, 18+12, 3+12, 19+12);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
+ (__v16si)_mm512_unpackhi_epi32(__A, __B),
+ (__v16si)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
+ (__v16si)_mm512_unpackhi_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi32(__m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
+ 0, 16, 1, 17,
+ 0+4, 16+4, 1+4, 17+4,
+ 0+8, 16+8, 1+8, 17+8,
+ 0+12, 16+12, 1+12, 17+12);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
+ (__v16si)_mm512_unpacklo_epi32(__A, __B),
+ (__v16si)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
+ (__v16si)_mm512_unpacklo_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi64(__m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
+ 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
+ (__v8di)_mm512_unpackhi_epi64(__A, __B),
+ (__v8di)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
+ (__v8di)_mm512_unpackhi_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
+ 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
+ (__v8di)_mm512_unpacklo_epi64(__A, __B),
+ (__v8di)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
+ (__v8di)_mm512_unpacklo_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
+}
+
+/* Bit Test */
+
+static __inline __mmask16 __DEFAULT_FN_ATTRS
+_mm512_test_epi32_mask(__m512i __A, __m512i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A,
+ (__v16si) __B, __U);
+}
+
+static __inline __mmask8 __DEFAULT_FN_ATTRS
+_mm512_test_epi64_mask(__m512i __A, __m512i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A, (__v8di) __B, __U);
+}
+
+
+/* SIMD load ops */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_loadu_si512 (void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *)__P,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *)__P,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_loadups512_mask ((const float *)__P,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_loadupd512_mask ((const double *)__P,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_loadu_pd(double const *__p)
+{
+ struct __loadu_pd {
+ __m512d __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_pd*)__p)->__v;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_loadu_ps(float const *__p)
+{
+ struct __loadu_ps {
+ __m512 __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_ps*)__p)->__v;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_load_ps(float const *__p)
+{
+ return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__p,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_load_ps(__mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_load_pd(double const *__p)
+{
+ return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__p,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_load_pd(__mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_load_si512 (void const *__P)
+{
+ return *(__m512i *) __P;
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_load_epi32 (void const *__P)
+{
+ return *(__m512i *) __P;
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_load_epi64 (void const *__P)
+{
+ return *(__m512i *) __P;
+}
+
+/* SIMD store ops */
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
+{
+ __builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A,
+ (__mmask8) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_storeu_si512 (void *__P, __m512i __A)
+{
+ __builtin_ia32_storedqusi512_mask ((int *) __P, (__v16si) __A,
+ (__mmask16) -1);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
+{
+ __builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
+{
+ __builtin_ia32_storeupd512_mask ((double *)__P, (__v8df) __A, (__mmask8) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_storeu_pd(void *__P, __m512d __A)
+{
+ __builtin_ia32_storeupd512_mask((double *)__P, (__v8df)__A, (__mmask8)-1);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
+{
+ __builtin_ia32_storeups512_mask ((float *)__P, (__v16sf) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_storeu_ps(void *__P, __m512 __A)
+{
+ __builtin_ia32_storeups512_mask((float *)__P, (__v16sf)__A, (__mmask16)-1);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A)
+{
+ __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_pd(void *__P, __m512d __A)
+{
+ *(__m512d*)__P = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A)
+{
+ __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_ps(void *__P, __m512 __A)
+{
+ *(__m512*)__P = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_si512 (void *__P, __m512i __A)
+{
+ *(__m512i *) __P = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_epi32 (void *__P, __m512i __A)
+{
+ *(__m512i *) __P = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_epi64 (void *__P, __m512i __A)
+{
+ *(__m512i *) __P = __A;
+}
+
+/* Mask ops */
+
+static __inline __mmask16 __DEFAULT_FN_ATTRS
+_mm512_knot(__mmask16 __M)
+{
+ return __builtin_ia32_knothi(__M);
+}
+
+/* Integer compare */
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__a, (__v16si)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__a, (__v16si)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 0,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__a, (__v16si)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__a, (__v16si)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 6,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__a, (__v8di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__a, (__v8di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ __u);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi8_epi32 (__m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi8_epi32 (__m512i __W, __mmask16 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi8_epi32 (__mmask16 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi8_epi64 (__m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi8_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_epi64 (__m256i __X)
+{
+ return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_epi64 (__m512i __W, __mmask8 __U, __m256i __X)
+{
+ return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi32_epi64 (__mmask8 __U, __m256i __X)
+{
+ return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi16_epi32 (__m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi16_epi32 (__m512i __W, __mmask16 __U, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi16_epi32 (__mmask16 __U, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepi16_epi64 (__m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi16_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu8_epi32 (__m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu8_epi32 (__m512i __W, __mmask16 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu8_epi32 (__mmask16 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu8_epi64 (__m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu8_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu32_epi64 (__m256i __X)
+{
+ return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu32_epi64 (__m512i __W, __mmask8 __U, __m256i __X)
+{
+ return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu32_epi64 (__mmask8 __U, __m256i __X)
+{
+ return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu16_epi32 (__m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu16_epi32 (__m512i __W, __mmask16 __U, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu16_epi32 (__mmask16 __U, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtepu16_epi64 (__m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu16_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_rorv_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_rorv_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+
+
+#define _mm512_cmp_epi32_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)-1); })
+
+#define _mm512_cmp_epu32_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)-1); })
+
+#define _mm512_cmp_epi64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm512_cmp_epu64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)(m)); })
+
+#define _mm512_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)(m)); })
+
+#define _mm512_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm512_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm512_rol_epi32(a, b) __extension__ ({ \
+ (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_rol_epi32(W, U, a, b) __extension__ ({ \
+ (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_rol_epi32(U, a, b) __extension__ ({ \
+ (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U)); })
+
+#define _mm512_rol_epi64(a, b) __extension__ ({ \
+ (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_rol_epi64(W, U, a, b) __extension__ ({ \
+ (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
+ (__v8di)(__m512i)(W), (__mmask8)(U)); })
+
+#define _mm512_maskz_rol_epi64(U, a, b) __extension__ ({ \
+ (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_rolv_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_rolv_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+#define _mm512_ror_epi32(A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_ror_epi32(W, U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_ror_epi32(U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U)); })
+
+#define _mm512_ror_epi64(A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_ror_epi64(W, U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)(__m512i)(W), (__mmask8)(U)); })
+
+#define _mm512_maskz_ror_epi64(U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U)); })
+
+#define _mm512_slli_epi32(A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_slli_epi32(W, U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_slli_epi32(U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U)); })
+
+#define _mm512_slli_epi64(A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_slli_epi64(W, U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_slli_epi64(U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U)); })
+
+
+
+#define _mm512_srli_epi32(A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_srli_epi32(W, U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_srli_epi32(U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U)); })
+
+#define _mm512_srli_epi64(A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_srli_epi64(W, U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_srli_epi64(U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A)
+{
+ __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
+ (__v16si) __A,
+ (__v16si) __W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
+ (__v16si) __A,
+ (__v16si) _mm512_setzero_si512 ());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
+ (__v8di) __A,
+ (__v8di) __W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
+ (__v8di) __A,
+ (__v8di) _mm512_setzero_si512 ());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A)
+{
+ __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_movedup_pd (__m512d __A)
+{
+ return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A,
+ 0, 0, 2, 2, 4, 4, 6, 6);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_movedup_pd(__A),
+ (__v8df)__W);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
+{
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_movedup_pd(__A),
+ (__v8df)_mm512_setzero_pd());
+}
+
+#define _mm512_fixupimm_round_pd(A, B, C, imm, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_fixupimm_pd(A, B, C, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), \
+ (int)(imm), (__mmask8)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), \
+ (int)(imm), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_fixupimm_round_ps(A, B, C, imm, R) __extension__ ({ \
+ (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) __extension__ ({ \
+ (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_fixupimm_ps(A, B, C, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) __extension__ ({ \
+ (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), \
+ (int)(imm), (__mmask16)(U), \
+ (int)(R)); })
+
+#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), \
+ (int)(imm), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_fixupimm_round_sd(A, B, C, imm, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_fixupimm_sd(A, B, C, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_fixupimm_sd(A, U, B, C, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_fixupimm_round_ss(A, B, C, imm, R) __extension__ ({ \
+ (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) __extension__ ({ \
+ (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_fixupimm_ss(A, B, C, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_fixupimm_ss(A, U, B, C, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) __extension__ ({ \
+ (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_getexp_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_getexp_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A,
+ (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_getexp_round_sd(W, U, A, B, R) __extension__ ({\
+ (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_getexp_round_sd(U, A, B, R) __extension__ ({\
+ (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_getexp_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_getexp_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
+ (__v4sf) __B, (__v4sf) _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_getexp_round_ss(W, U, A, B, R) __extension__ ({\
+ (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_getexp_round_ss(U, A, B, R) __extension__ ({\
+ (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_getmant_round_sd(A, B, C, D, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_getmant_sd(A, B, C, D) __extension__ ({ \
+ (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_getmant_sd(W, U, A, B, C, D) __extension__ ({\
+ (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R)({\
+ (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_getmant_sd(U, A, B, C, D) __extension__ ({\
+ (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) __extension__ ({\
+ (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_getmant_round_ss(A, B, C, D, R) __extension__ ({ \
+ (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_getmant_ss(A, B, C, D) __extension__ ({ \
+ (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_getmant_ss(W, U, A, B, C, D) __extension__ ({\
+ (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R)({\
+ (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_getmant_ss(U, A, B, C, D) __extension__ ({\
+ (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) __extension__ ({\
+ (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kmov (__mmask16 __A)
+{
+ return __A;
+}
+
+#define _mm_comi_round_sd(A, B, P, R) __extension__ ({\
+ (int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
+ (int)(P), (int)(R)); })
+
+#define _mm_comi_round_ss(A, B, P, R) __extension__ ({\
+ (int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
+ (int)(P), (int)(R)); })
+
+#define _mm_cvt_roundsd_si64(A, R) __extension__ ({ \
+ (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I,
+ __mmask16 __U, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermi2vard512_mask ((__v16si) __A,
+ (__v16si) __I
+ /* idx */ ,
+ (__v16si) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sll_epi32 (__m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
+ (__v4si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sll_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
+ (__v4si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sll_epi32 (__mmask16 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
+ (__v4si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sll_epi64 (__m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
+ (__v2di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sll_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
+ (__v2di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sll_epi64 (__mmask8 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
+ (__v2di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sllv_epi32 (__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sllv_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sllv_epi32 (__mmask16 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sllv_epi64 (__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di)
+ _mm512_undefined_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sllv_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sllv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sra_epi32 (__m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
+ (__v4si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sra_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
+ (__v4si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sra_epi32 (__mmask16 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
+ (__v4si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sra_epi64 (__m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
+ (__v2di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sra_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
+ (__v2di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sra_epi64 (__mmask8 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
+ (__v2di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srav_epi32 (__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srav_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srav_epi32 (__mmask16 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srav_epi64 (__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srav_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srav_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srl_epi32 (__m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
+ (__v4si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srl_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
+ (__v4si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srl_epi32 (__mmask16 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
+ (__v4si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srl_epi64 (__m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
+ (__v2di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srl_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
+ (__v2di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srl_epi64 (__mmask8 __U, __m512i __A, __m128i __B)
+{
+ return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
+ (__v2di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srlv_epi32 (__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srlv_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srlv_epi32 (__mmask16 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srlv_epi64 (__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srlv_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srlv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+#define _mm512_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), \
+ (__v16si)(__m512i)(C), \
+ (int)(imm), (__mmask16)(U)); })
+
+#define _mm512_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm_cvt_roundsd_i64(A, R) __extension__ ({ \
+ (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+
+#define _mm_cvt_roundsd_si32(A, R) __extension__ ({ \
+ (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+
+#define _mm_cvt_roundsd_i32(A, R) __extension__ ({ \
+ (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+
+#define _mm_cvt_roundsd_u32(A, R) __extension__ ({ \
+ (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)); })
+
+static __inline__ unsigned __DEFAULT_FN_ATTRS
+_mm_cvtsd_u32 (__m128d __A)
+{
+ return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundsd_u64(A, R) __extension__ ({ \
+ (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
+ (int)(R)); })
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_cvtsd_u64 (__m128d __A)
+{
+ return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df)
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundss_si32(A, R) __extension__ ({ \
+ (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvt_roundss_i32(A, R) __extension__ ({ \
+ (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvt_roundss_si64(A, R) __extension__ ({ \
+ (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvt_roundss_i64(A, R) __extension__ ({ \
+ (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvt_roundss_u32(A, R) __extension__ ({ \
+ (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)); })
+
+static __inline__ unsigned __DEFAULT_FN_ATTRS
+_mm_cvtss_u32 (__m128 __A)
+{
+ return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundss_u64(A, R) __extension__ ({ \
+ (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
+ (int)(R)); })
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_cvtss_u64 (__m128 __A)
+{
+ return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf)
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundsd_i32(A, R) __extension__ ({ \
+ (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+
+#define _mm_cvtt_roundsd_si32(A, R) __extension__ ({ \
+ (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvttsd_i32 (__m128d __A)
+{
+ return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundsd_si64(A, R) __extension__ ({ \
+ (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+
+#define _mm_cvtt_roundsd_i64(A, R) __extension__ ({ \
+ (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvttsd_i64 (__m128d __A)
+{
+ return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundsd_u32(A, R) __extension__ ({ \
+ (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)); })
+
+static __inline__ unsigned __DEFAULT_FN_ATTRS
+_mm_cvttsd_u32 (__m128d __A)
+{
+ return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundsd_u64(A, R) __extension__ ({ \
+ (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
+ (int)(R)); })
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_cvttsd_u64 (__m128d __A)
+{
+ return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df)
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundss_i32(A, R) __extension__ ({ \
+ (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvtt_roundss_si32(A, R) __extension__ ({ \
+ (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); })
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvttss_i32 (__m128 __A)
+{
+ return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundss_i64(A, R) __extension__ ({ \
+ (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); })
+
+#define _mm_cvtt_roundss_si64(A, R) __extension__ ({ \
+ (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); })
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvttss_i64 (__m128 __A)
+{
+ return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundss_u32(A, R) __extension__ ({ \
+ (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)); })
+
+static __inline__ unsigned __DEFAULT_FN_ATTRS
+_mm_cvttss_u32 (__m128 __A)
+{
+ return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtt_roundss_u64(A, R) __extension__ ({ \
+ (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
+ (int)(R)); })
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_cvttss_u64 (__m128 __A)
+{
+ return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf)
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_pd (__m512d __A, __m512i __I, __mmask8 __U,
+ __m512d __B)
+{
+ return (__m512d) __builtin_ia32_vpermi2varpd512_mask ((__v8df) __A,
+ (__v8di) __I
+ /* idx */ ,
+ (__v8df) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_ps (__m512 __A, __m512i __I, __mmask16 __U,
+ __m512 __B)
+{
+ return (__m512) __builtin_ia32_vpermi2varps512_mask ((__v16sf) __A,
+ (__v16si) __I
+ /* idx */ ,
+ (__v16sf) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_epi64 (__m512i __A, __m512i __I,
+ __mmask8 __U, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermi2varq512_mask ((__v8di) __A,
+ (__v8di) __I
+ /* idx */ ,
+ (__v8di) __B,
+ (__mmask8) __U);
+}
+
+#define _mm512_permute_pd(X, C) __extension__ ({ \
+ (__m512d)__builtin_shufflevector((__v8df)(__m512d)(X), \
+ (__v8df)_mm512_undefined_pd(), \
+ 0 + (((C) >> 0) & 0x1), \
+ 0 + (((C) >> 1) & 0x1), \
+ 2 + (((C) >> 2) & 0x1), \
+ 2 + (((C) >> 3) & 0x1), \
+ 4 + (((C) >> 4) & 0x1), \
+ 4 + (((C) >> 5) & 0x1), \
+ 6 + (((C) >> 6) & 0x1), \
+ 6 + (((C) >> 7) & 0x1)); })
+
+#define _mm512_mask_permute_pd(W, U, X, C) __extension__ ({ \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permute_pd((X), (C)), \
+ (__v8df)(__m512d)(W)); })
+
+#define _mm512_maskz_permute_pd(U, X, C) __extension__ ({ \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permute_pd((X), (C)), \
+ (__v8df)_mm512_setzero_pd()); })
+
+#define _mm512_permute_ps(X, C) __extension__ ({ \
+ (__m512)__builtin_shufflevector((__v16sf)(__m512)(X), \
+ (__v16sf)_mm512_undefined_ps(), \
+ 0 + (((C) >> 0) & 0x3), \
+ 0 + (((C) >> 2) & 0x3), \
+ 0 + (((C) >> 4) & 0x3), \
+ 0 + (((C) >> 6) & 0x3), \
+ 4 + (((C) >> 0) & 0x3), \
+ 4 + (((C) >> 2) & 0x3), \
+ 4 + (((C) >> 4) & 0x3), \
+ 4 + (((C) >> 6) & 0x3), \
+ 8 + (((C) >> 0) & 0x3), \
+ 8 + (((C) >> 2) & 0x3), \
+ 8 + (((C) >> 4) & 0x3), \
+ 8 + (((C) >> 6) & 0x3), \
+ 12 + (((C) >> 0) & 0x3), \
+ 12 + (((C) >> 2) & 0x3), \
+ 12 + (((C) >> 4) & 0x3), \
+ 12 + (((C) >> 6) & 0x3)); })
+
+#define _mm512_mask_permute_ps(W, U, X, C) __extension__ ({ \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_permute_ps((X), (C)), \
+ (__v16sf)(__m512)(W)); })
+
+#define _mm512_maskz_permute_ps(U, X, C) __extension__ ({ \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_permute_ps((X), (C)), \
+ (__v16sf)_mm512_setzero_ps()); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_permutevar_pd (__m512d __A, __m512i __C)
+{
+ return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
+ (__v8di) __C,
+ (__v8df)
+ _mm512_undefined_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_permutevar_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
+{
+ return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
+ (__v8di) __C,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_permutevar_pd (__mmask8 __U, __m512d __A, __m512i __C)
+{
+ return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
+ (__v8di) __C,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_permutevar_ps (__m512 __A, __m512i __C)
+{
+ return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
+ (__v16si) __C,
+ (__v16sf)
+ _mm512_undefined_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_permutevar_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
+{
+ return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
+ (__v16si) __C,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_permutevar_ps (__mmask16 __U, __m512 __A, __m512i __C)
+{
+ return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
+ (__v16si) __C,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
+ /* idx */ ,
+ (__v8df) __A,
+ (__v8df) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_pd (__m512d __A, __mmask8 __U, __m512i __I, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
+ /* idx */ ,
+ (__v8df) __A,
+ (__v8df) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_pd (__mmask8 __U, __m512d __A, __m512i __I,
+ __m512d __B)
+{
+ return (__m512d) __builtin_ia32_vpermt2varpd512_maskz ((__v8di) __I
+ /* idx */ ,
+ (__v8df) __A,
+ (__v8df) __B,
+ (__mmask8) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
+{
+ return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
+ /* idx */ ,
+ (__v16sf) __A,
+ (__v16sf) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_ps (__m512 __A, __mmask16 __U, __m512i __I, __m512 __B)
+{
+ return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
+ /* idx */ ,
+ (__v16sf) __A,
+ (__v16sf) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_ps (__mmask16 __U, __m512 __A, __m512i __I,
+ __m512 __B)
+{
+ return (__m512) __builtin_ia32_vpermt2varps512_maskz ((__v16si) __I
+ /* idx */ ,
+ (__v16sf) __A,
+ (__v16sf) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_testn_epi32_mask (__m512i __A, __m512i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestnmd512 ((__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestnmd512 ((__v16si) __A,
+ (__v16si) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_testn_epi64_mask (__m512i __A, __m512i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmq512 ((__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmq512 ((__v8di) __A,
+ (__v8di) __B, __U);
+}
+
+#define _mm512_cvtt_roundpd_epu32(A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_undefined_si256(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epu32 (__m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
+ (__v8si)
+ _mm256_undefined_si256 (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
+ (__v8si) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
+{
+ return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_roundscale_round_sd(A, B, imm, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(imm), \
+ (int)(R)); })
+
+#define _mm_roundscale_sd(A, B, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(imm), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_roundscale_sd(W, U, A, B, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(imm), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)); })
+
+#define _mm_maskz_roundscale_sd(U, A, B, I) __extension__ ({ \
+ (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(I), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)); })
+
+#define _mm_roundscale_round_ss(A, B, imm, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(imm), \
+ (int)(R)); })
+
+#define _mm_roundscale_ss(A, B, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(imm), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_roundscale_ss(W, U, A, B, I) __extension__ ({ \
+ (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(I), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)); })
+
+#define _mm_maskz_roundscale_ss(U, A, B, I) __extension__ ({ \
+ (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(I), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)); })
+
+#define _mm512_scalef_round_pd(A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_scalef_round_pd(W, U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_scalef_round_pd(U, A, B, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_scalef_pd (__m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_undefined_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_scalef_round_ps(A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_scalef_round_ps(W, U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_scalef_round_ps(U, A, B, R) __extension__ ({ \
+ (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_scalef_ps (__m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_undefined_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_scalef_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_scalef_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A,
+ (__v2df)( __B), (__v2df) _mm_setzero_pd(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_scalef_round_sd(W, U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_scalef_round_sd(U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_scalef_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_scalef_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A,
+ (__v4sf)( __B), (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_scalef_round_ss(W, U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_scalef_round_ss(U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_srai_epi32(A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_srai_epi32(W, U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_srai_epi32(U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U)); })
+
+#define _mm512_srai_epi64(A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_srai_epi64(W, U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_srai_epi64(U, A, B) __extension__ ({ \
+ (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U)); })
+
+#define _mm512_shuffle_f32x4(A, B, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(imm), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(imm), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(imm), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U)); })
+
+#define _mm512_shuffle_f64x2(A, B, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(imm), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(imm), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(imm), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm512_shuffle_i32x4(A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(imm), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(imm), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(imm), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U)); })
+
+#define _mm512_shuffle_i64x2(A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(imm), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(imm), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(imm), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U)); })
+
+#define _mm512_shuffle_pd(A, B, M) __extension__ ({ \
+ (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ 0 + (((M) >> 0) & 0x1), \
+ 8 + (((M) >> 1) & 0x1), \
+ 2 + (((M) >> 2) & 0x1), \
+ 10 + (((M) >> 3) & 0x1), \
+ 4 + (((M) >> 4) & 0x1), \
+ 12 + (((M) >> 5) & 0x1), \
+ 6 + (((M) >> 6) & 0x1), \
+ 14 + (((M) >> 7) & 0x1)); })
+
+#define _mm512_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+ (__v8df)(__m512d)(W)); })
+
+#define _mm512_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+ (__v8df)_mm512_setzero_pd()); })
+
+#define _mm512_shuffle_ps(A, B, M) __extension__ ({ \
+ (__m512d)__builtin_shufflevector((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ 0 + (((M) >> 0) & 0x3), \
+ 0 + (((M) >> 2) & 0x3), \
+ 16 + (((M) >> 4) & 0x3), \
+ 16 + (((M) >> 6) & 0x3), \
+ 4 + (((M) >> 0) & 0x3), \
+ 4 + (((M) >> 2) & 0x3), \
+ 20 + (((M) >> 4) & 0x3), \
+ 20 + (((M) >> 6) & 0x3), \
+ 8 + (((M) >> 0) & 0x3), \
+ 8 + (((M) >> 2) & 0x3), \
+ 24 + (((M) >> 4) & 0x3), \
+ 24 + (((M) >> 6) & 0x3), \
+ 12 + (((M) >> 0) & 0x3), \
+ 12 + (((M) >> 2) & 0x3), \
+ 28 + (((M) >> 4) & 0x3), \
+ 28 + (((M) >> 6) & 0x3)); })
+
+#define _mm512_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
+ (__v16sf)(__m512)(W)); })
+
+#define _mm512_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
+ (__v16sf)_mm512_setzero_ps()); })
+
+#define _mm_sqrt_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_sqrt_round_sd(W, U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_sqrt_round_sd(U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_sqrt_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_sqrt_round_ss(W, U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_sqrt_round_ss(U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_broadcast_f32x4 (__m128 __A)
+{
+ return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A,
+ (__v16sf)
+ _mm512_undefined_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_f32x4 (__m512 __O, __mmask16 __M, __m128 __A)
+{
+ return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A,
+ (__v16sf) __O,
+ __M);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_f32x4 (__mmask16 __M, __m128 __A)
+{
+ return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_broadcast_f64x4 (__m256d __A)
+{
+ return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A,
+ (__v8df)
+ _mm512_undefined_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_f64x4 (__m512d __O, __mmask8 __M, __m256d __A)
+{
+ return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A,
+ (__v8df) __O,
+ __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_f64x4 (__mmask8 __M, __m256d __A)
+{
+ return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcast_i32x4 (__m128i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A,
+ (__v16si)
+ _mm512_undefined_epi32 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_i32x4 (__m512i __O, __mmask16 __M, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A,
+ (__v16si) __O,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_i32x4 (__mmask16 __M, __m128i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_broadcast_i64x4 (__m256i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A,
+ (__v8di)
+ _mm512_undefined_epi32 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_broadcast_i64x4 (__m512i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A,
+ (__v8di) __O,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcast_i64x4 (__mmask8 __M, __m256i __A)
+{
+ return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A)
+{
+ return (__m512d)__builtin_ia32_selectpd_512(__M,
+ (__v8df) _mm512_broadcastsd_pd(__A),
+ (__v8df) __O);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
+{
+ return (__m512d)__builtin_ia32_selectpd_512(__M,
+ (__v8df) _mm512_broadcastsd_pd(__A),
+ (__v8df) _mm512_setzero_pd());
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A)
+{
+ return (__m512)__builtin_ia32_selectps_512(__M,
+ (__v16sf) _mm512_broadcastss_ps(__A),
+ (__v16sf) __O);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A)
+{
+ return (__m512)__builtin_ia32_selectps_512(__M,
+ (__v16sf) _mm512_broadcastss_ps(__A),
+ (__v16sf) _mm512_setzero_ps());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi32_epi8 (__m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
+ (__v16qi) _mm_undefined_si128 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
+{
+ __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi32_epi16 (__m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
+ (__v16hi) _mm256_undefined_si256 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
+ (__v16hi) __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
+ (__v16hi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
+{
+ __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi64_epi8 (__m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
+ (__v16qi) _mm_undefined_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
+{
+ __builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi64_epi32 (__m512i __A)
+{
+ __v8si __O;
+ return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
+ (__v8si) _mm256_undefined_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
+ (__v8si) __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
+ (__v8si) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A)
+{
+ __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi64_epi16 (__m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
+ (__v8hi) _mm_undefined_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A)
+{
+ __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi32_epi8 (__m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
+ (__v16qi) _mm_undefined_si128 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
+{
+ __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi32_epi16 (__m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
+ (__v16hi) _mm256_undefined_si256 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
+ (__v16hi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
+ (__v16hi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
+{
+ __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi64_epi8 (__m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
+ (__v16qi) _mm_undefined_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
+{
+ __builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi64_epi32 (__m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
+ (__v8si) _mm256_undefined_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
+ (__v8si) __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
+ (__v8si) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
+{
+ __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi64_epi16 (__m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
+ (__v8hi) _mm_undefined_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
+{
+ __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_epi8 (__m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
+ (__v16qi) _mm_undefined_si128 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
+{
+ __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_epi16 (__m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
+ (__v16hi) _mm256_undefined_si256 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
+ (__v16hi) __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
+ (__v16hi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A)
+{
+ __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_epi8 (__m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
+ (__v16qi) _mm_undefined_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
+{
+ __builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_epi32 (__m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
+ (__v8si) _mm256_undefined_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
+ (__v8si) __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A)
+{
+ return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
+ (__v8si) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
+{
+ __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_epi16 (__m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
+ (__v8hi) _mm_undefined_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
+{
+ __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
+}
+
+#define _mm512_extracti32x4_epi32(A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)_mm_undefined_si128(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm512_extracti64x4_epi64(A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)_mm256_undefined_si256(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_extracti64x4_epi64(U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+#define _mm512_insertf64x4(A, B, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
+ (__v4df)(__m256d)(B), (int)(imm), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_insertf64x4(W, U, A, B, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
+ (__v4df)(__m256d)(B), (int)(imm), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_insertf64x4(U, A, B, imm) __extension__ ({ \
+ (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
+ (__v4df)(__m256d)(B), (int)(imm), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm512_inserti64x4(A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_inserti64x4(W, U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm512_maskz_inserti64x4(U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U)); })
+
+#define _mm512_insertf32x4(A, B, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
+ (__v4sf)(__m128)(B), (int)(imm), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
+ (__v4sf)(__m128)(B), (int)(imm), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \
+ (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
+ (__v4sf)(__m128)(B), (int)(imm), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U)); })
+
+#define _mm512_inserti32x4(A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+#define _mm512_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm512_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \
+ (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U)); })
+
+#define _mm512_getmant_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_getmant_pd(A, B, C) __extension__ ({ \
+ (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_getmant_pd(W, U, A, B, C) __extension__ ({ \
+ (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_getmant_pd(U, A, B, C) __extension__ ({ \
+ (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_getmant_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_getmant_ps(A, B, C) __extension__ ({ \
+ (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2)|(B)), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \
+ (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2)|(B)), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_maskz_getmant_ps(U, A, B, C) __extension__ ({ \
+ (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2)|(B)), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_getexp_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_getexp_round_pd(W, U, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_getexp_round_pd(U, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_getexp_pd (__m512d __A)
+{
+ return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
+ (__v8df) _mm512_undefined_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_getexp_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)); })
+
+#define _mm512_mask_getexp_round_ps(W, U, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)); })
+
+#define _mm512_maskz_getexp_round_ps(U, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_getexp_ps (__m512 __A)
+{
+ return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
+ (__v16sf) _mm512_undefined_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_i64gather_ps(index, addr, scale) __extension__ ({ \
+ (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
+ (float const *)(addr), \
+ (__v8di)(__m512i)(index), (__mmask8)-1, \
+ (int)(scale)); })
+
+#define _mm512_mask_i64gather_ps( __v1_old, __mask, __index,\
+ __addr, __scale) __extension__({\
+__builtin_ia32_gatherdiv16sf ((__v8sf) __v1_old,\
+ __addr,(__v8di) __index, __mask, __scale);\
+})
+
+#define _mm512_i64gather_epi32(index, addr, scale) __extension__ ({\
+ (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_ps(), \
+ (int const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)-1, (int)(scale)); })
+
+#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
+ (int const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm512_i64gather_pd(index, addr, scale) __extension__ ({\
+ (__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
+ (double const *)(addr), \
+ (__v8di)(__m512i)(index), (__mmask8)-1, \
+ (int)(scale)); })
+
+#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
+ (double const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm512_i64gather_epi64(index, addr, scale) __extension__ ({\
+ (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_pd(), \
+ (long long const *)(addr), \
+ (__v8di)(__m512i)(index), (__mmask8)-1, \
+ (int)(scale)); })
+
+#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
+ (long long const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm512_i32gather_ps(index, addr, scale) __extension__ ({\
+ (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
+ (float const *)(addr), \
+ (__v16sf)(__m512)(index), \
+ (__mmask16)-1, (int)(scale)); })
+
+#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
+ (float const *)(addr), \
+ (__v16sf)(__m512)(index), \
+ (__mmask16)(mask), (int)(scale)); })
+
+#define _mm512_i32gather_epi32(index, addr, scale) __extension__ ({\
+ (__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
+ (int const *)(addr), \
+ (__v16si)(__m512i)(index), \
+ (__mmask16)-1, (int)(scale)); })
+
+#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
+ (int const *)(addr), \
+ (__v16si)(__m512i)(index), \
+ (__mmask16)(mask), (int)(scale)); })
+
+#define _mm512_i32gather_pd(index, addr, scale) __extension__ ({\
+ (__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
+ (double const *)(addr), \
+ (__v8si)(__m256i)(index), (__mmask8)-1, \
+ (int)(scale)); })
+
+#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
+ (double const *)(addr), \
+ (__v8si)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm512_i32gather_epi64(index, addr, scale) __extension__ ({\
+ (__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
+ (long long const *)(addr), \
+ (__v8si)(__m256i)(index), (__mmask8)-1, \
+ (int)(scale)); })
+
+#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
+ (long long const *)(addr), \
+ (__v8si)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm512_i64scatter_ps(addr, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)-1, \
+ (__v8di)(__m512i)(index), \
+ (__v8sf)(__m256)(v1), (int)(scale)); })
+
+#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)(mask), \
+ (__v8di)(__m512i)(index), \
+ (__v8sf)(__m256)(v1), (int)(scale)); })
+
+#define _mm512_i64scatter_epi32(addr, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)-1, \
+ (__v8di)(__m512i)(index), \
+ (__v8si)(__m256i)(v1), (int)(scale)); })
+
+#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)(mask), \
+ (__v8di)(__m512i)(index), \
+ (__v8si)(__m256i)(v1), (int)(scale)); })
+
+#define _mm512_i64scatter_pd(addr, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)-1, \
+ (__v8di)(__m512i)(index), \
+ (__v8df)(__m512d)(v1), (int)(scale)); })
+
+#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)(mask), \
+ (__v8di)(__m512i)(index), \
+ (__v8df)(__m512d)(v1), (int)(scale)); })
+
+#define _mm512_i64scatter_epi64(addr, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)-1, \
+ (__v8di)(__m512i)(index), \
+ (__v8di)(__m512i)(v1), (int)(scale)); })
+
+#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)(mask), \
+ (__v8di)(__m512i)(index), \
+ (__v8di)(__m512i)(v1), (int)(scale)); })
+
+#define _mm512_i32scatter_ps(addr, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)-1, \
+ (__v16si)(__m512i)(index), \
+ (__v16sf)(__m512)(v1), (int)(scale)); })
+
+#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)(mask), \
+ (__v16si)(__m512i)(index), \
+ (__v16sf)(__m512)(v1), (int)(scale)); })
+
+#define _mm512_i32scatter_epi32(addr, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)-1, \
+ (__v16si)(__m512i)(index), \
+ (__v16si)(__m512i)(v1), (int)(scale)); })
+
+#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)(mask), \
+ (__v16si)(__m512i)(index), \
+ (__v16si)(__m512i)(v1), (int)(scale)); })
+
+#define _mm512_i32scatter_pd(addr, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)-1, \
+ (__v8si)(__m256i)(index), \
+ (__v8df)(__m512d)(v1), (int)(scale)); })
+
+#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)(mask), \
+ (__v8si)(__m256i)(index), \
+ (__v8df)(__m512d)(v1), (int)(scale)); })
+
+#define _mm512_i32scatter_epi64(addr, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)-1, \
+ (__v8si)(__m256i)(index), \
+ (__v8di)(__m512i)(v1), (int)(scale)); })
+
+#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({\
+ __builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)(mask), \
+ (__v8si)(__m256i)(index), \
+ (__v8di)(__m512i)(v1), (int)(scale)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fmadd_round_ss(W, U, A, B, R) __extension__({\
+ (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) __extension__ ({\
+ (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
+ (__v4sf) __X,
+ (__v4sf) __Y,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) __extension__ ({\
+ (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __A,
+ -(__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fmsub_round_ss(W, U, A, B, R) __extension__ ({\
+ (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) __extension__ ({\
+ (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
+ (__v4sf) __X,
+ -(__v4sf) __Y,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) __extension__ ({\
+ (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(X), \
+ -(__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask (-(__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) __extension__ ({\
+ (__m128)__builtin_ia32_vfmaddss3_mask(-(__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz (-(__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) __extension__ ({\
+ (__m128)__builtin_ia32_vfmaddss3_maskz(-(__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 (-(__v4sf) __W,
+ (__v4sf) __X,
+ (__v4sf) __Y,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) __extension__({\
+ (__m128)__builtin_ia32_vfmaddss3_mask3(-(__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask (-(__v4sf) __A,
+ -(__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) __extension__ ({\
+ (__m128)__builtin_ia32_vfmaddss3_mask(-(__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_maskz (-(__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) __extension__ ({\
+ (__m128)__builtin_ia32_vfmaddss3_maskz(-(__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddss3_mask3 (-(__v4sf) __W,
+ (__v4sf) __X,
+ -(__v4sf) __Y,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) __extension__({\
+ (__m128)__builtin_ia32_vfmaddss3_mask3(-(__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(X), \
+ -(__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fmadd_round_sd(W, U, A, B, R) __extension__({\
+ (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) __extension__ ({\
+ (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
+ (__v2df) __X,
+ (__v2df) __Y,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) __extension__ ({\
+ (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __A,
+ -(__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fmsub_round_sd(W, U, A, B, R) __extension__ ({\
+ (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) __extension__ ({\
+ (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
+ (__v2df) __X,
+ -(__v2df) __Y,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) __extension__ ({\
+ (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(X), \
+ -(__v2df)(__m128d)(Y), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( -(__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) __extension__ ({\
+ (__m128d)__builtin_ia32_vfmaddsd3_mask(-(__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( -(__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) __extension__ ({\
+ (__m128d)__builtin_ia32_vfmaddsd3_maskz(-(__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 (-(__v2df) __W,
+ (__v2df) __X,
+ (__v2df) __Y,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) __extension__({\
+ (__m128d)__builtin_ia32_vfmaddsd3_mask3(-(__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( -(__v2df) __A,
+ -(__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) __extension__ ({\
+ (__m128d)__builtin_ia32_vfmaddsd3_mask(-(__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( -(__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) __extension__ ({\
+ (__m128d)__builtin_ia32_vfmaddsd3_maskz(-(__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask3 (-(__v2df) (__W),
+ (__v2df) __X,
+ -(__v2df) (__Y),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) __extension__({\
+ (__m128d)__builtin_ia32_vfmaddsd3_mask3(-(__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(X), \
+ -(__v2df)(__m128d)(Y), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_permutex_pd(X, C) __extension__ ({ \
+ (__m512d)__builtin_shufflevector((__v8df)(__m512d)(X), \
+ (__v8df)_mm512_undefined_pd(), \
+ 0 + (((C) >> 0) & 0x3), \
+ 0 + (((C) >> 2) & 0x3), \
+ 0 + (((C) >> 4) & 0x3), \
+ 0 + (((C) >> 6) & 0x3), \
+ 4 + (((C) >> 0) & 0x3), \
+ 4 + (((C) >> 2) & 0x3), \
+ 4 + (((C) >> 4) & 0x3), \
+ 4 + (((C) >> 6) & 0x3)); })
+
+#define _mm512_mask_permutex_pd(W, U, X, C) __extension__ ({ \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permutex_pd((X), (C)), \
+ (__v8df)(__m512d)(W)); })
+
+#define _mm512_maskz_permutex_pd(U, X, C) __extension__ ({ \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permutex_pd((X), (C)), \
+ (__v8df)_mm512_setzero_pd()); })
+
+#define _mm512_permutex_epi64(X, C) __extension__ ({ \
+ (__m512i)__builtin_shufflevector((__v8di)(__m512i)(X), \
+ (__v8di)_mm512_undefined_epi32(), \
+ 0 + (((C) >> 0) & 0x3), \
+ 0 + (((C) >> 2) & 0x3), \
+ 0 + (((C) >> 4) & 0x3), \
+ 0 + (((C) >> 6) & 0x3), \
+ 4 + (((C) >> 0) & 0x3), \
+ 4 + (((C) >> 2) & 0x3), \
+ 4 + (((C) >> 4) & 0x3), \
+ 4 + (((C) >> 6) & 0x3)); })
+
+#define _mm512_mask_permutex_epi64(W, U, X, C) __extension__ ({ \
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_permutex_epi64((X), (C)), \
+ (__v8di)(__m512i)(W)); })
+
+#define _mm512_maskz_permutex_epi64(U, X, C) __extension__ ({ \
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_permutex_epi64((X), (C)), \
+ (__v8di)_mm512_setzero_si512()); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_permutexvar_pd (__m512i __X, __m512d __Y)
+{
+ return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
+ (__v8di) __X,
+ (__v8df) _mm512_undefined_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y)
+{
+ return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
+ (__v8di) __X,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y)
+{
+ return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
+ (__v8di) __X,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
+ (__v8di) __X,
+ (__v8di) _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
+ (__v8di) __X,
+ (__v8di) _mm512_undefined_epi32 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X,
+ __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
+ (__v8di) __X,
+ (__v8di) __W,
+ __M);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_permutexvar_ps (__m512i __X, __m512 __Y)
+{
+ return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
+ (__v16si) __X,
+ (__v16sf) _mm512_undefined_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y)
+{
+ return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
+ (__v16si) __X,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y)
+{
+ return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
+ (__v16si) __X,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
+ (__v16si) __X,
+ (__v16si) _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
+ (__v16si) __X,
+ (__v16si) _mm512_undefined_epi32 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X,
+ __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
+ (__v16si) __X,
+ (__v16si) __W,
+ __M);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kand (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kandn (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kor (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_kortestc (__mmask16 __A, __mmask16 __B)
+{
+ return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_kortestz (__mmask16 __A, __mmask16 __B)
+{
+ return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kunpackb (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kxnor (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kxor (__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_stream_si512 (__m512i * __P, __m512i __A)
+{
+ __builtin_nontemporal_store((__v8di)__A, (__v8di*)__P);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_stream_load_si512 (void *__P)
+{
+ return __builtin_ia32_movntdqa512 ((__v8di *)__P);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_stream_pd (double *__P, __m512d __A)
+{
+ __builtin_nontemporal_store((__v8df)__A, (__v8df*)__P);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_stream_ps (float *__P, __m512 __A)
+{
+ __builtin_nontemporal_store((__v16sf)__A, (__v16sf*)__P);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+#define _mm_cmp_round_ss_mask(X, Y, P, R) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm_cmp_ss_mask(X, Y, P) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_cmp_ss_mask(M, X, Y, P) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)(M), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_cmp_round_sd_mask(X, Y, P, R) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)(M), (int)(R)); })
+
+#define _mm_cmp_sd_mask(X, Y, P) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm_mask_cmp_sd_mask(M, X, Y, P) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)(M), \
+ _MM_FROUND_CUR_DIRECTION); })
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_movehdup_ps (__m512 __A)
+{
+ return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
+ 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_movehdup_ps(__A),
+ (__v16sf)__W);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A)
+{
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_movehdup_ps(__A),
+ (__v16sf)_mm512_setzero_ps());
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_moveldup_ps (__m512 __A)
+{
+ return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
+ 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_moveldup_ps(__A),
+ (__v16sf)__W);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A)
+{
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_moveldup_ps(__A),
+ (__v16sf)_mm512_setzero_ps());
+}
+
+#define _mm512_shuffle_epi32(A, I) __extension__ ({ \
+ (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \
+ (__v16si)_mm512_undefined_epi32(), \
+ 0 + (((I) >> 0) & 0x3), \
+ 0 + (((I) >> 2) & 0x3), \
+ 0 + (((I) >> 4) & 0x3), \
+ 0 + (((I) >> 6) & 0x3), \
+ 4 + (((I) >> 0) & 0x3), \
+ 4 + (((I) >> 2) & 0x3), \
+ 4 + (((I) >> 4) & 0x3), \
+ 4 + (((I) >> 6) & 0x3), \
+ 8 + (((I) >> 0) & 0x3), \
+ 8 + (((I) >> 2) & 0x3), \
+ 8 + (((I) >> 4) & 0x3), \
+ 8 + (((I) >> 6) & 0x3), \
+ 12 + (((I) >> 0) & 0x3), \
+ 12 + (((I) >> 2) & 0x3), \
+ 12 + (((I) >> 4) & 0x3), \
+ 12 + (((I) >> 6) & 0x3)); })
+
+#define _mm512_mask_shuffle_epi32(W, U, A, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shuffle_epi32((A), (I)), \
+ (__v16si)(__m512i)(W)); })
+
+#define _mm512_maskz_shuffle_epi32(U, A, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shuffle_epi32((A), (I)), \
+ (__v16si)_mm512_setzero_si512()); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
+ (__v8di) _mm512_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
+ (__v16sf) _mm512_setzero_ps(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
+ (__v16si) _mm512_setzero_ps(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
+ (__v16sf) _mm512_setzero_ps(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_ps(),
+ (__mmask16) __U);
+}
+
+#define _mm512_cvt_roundps_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm512_mask_cvt_roundps_pd(W, U, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm512_maskz_cvt_roundps_pd(U, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtps_pd (__m256 __A)
+{
+ return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
+ (__v8df)
+ _mm512_undefined_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A)
+{
+ return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A)
+{
+ return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
+ (__v8df) __A,
+ (__v8df) __W);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A)
+{
+ return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
+ (__v8df) __A,
+ (__v8df) _mm512_setzero_pd ());
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
+ (__v16sf) __A,
+ (__v16sf) __W);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A)
+{
+ return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
+ (__v16sf) __A,
+ (__v16sf) _mm512_setzero_ps ());
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A)
+{
+ __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A)
+{
+ __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A)
+{
+ __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A,
+ (__mmask16) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A)
+{
+ __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A,
+ (__mmask16) __U);
+}
+
+#define _mm_cvt_roundsd_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v4sf)_mm_undefined_ps(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
+{
+ return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A),
+ (__v2df)(__B),
+ (__v4sf)(__W),
+ (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
+{
+ return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A),
+ (__v2df)(__B),
+ (__v4sf)_mm_setzero_ps(),
+ (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvtss_i32 _mm_cvtss_si32
+#define _mm_cvtss_i64 _mm_cvtss_si64
+#define _mm_cvtsd_i32 _mm_cvtsd_si32
+#define _mm_cvtsd_i64 _mm_cvtsd_si64
+#define _mm_cvti32_sd _mm_cvtsi32_sd
+#define _mm_cvti64_sd _mm_cvtsi64_sd
+#define _mm_cvti32_ss _mm_cvtsi32_ss
+#define _mm_cvti64_ss _mm_cvtsi64_ss
+
+#define _mm_cvt_roundi64_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
+ (int)(R)); })
+
+#define _mm_cvt_roundsi64_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
+ (int)(R)); })
+
+#define _mm_cvt_roundsi32_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
+
+#define _mm_cvt_roundi32_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
+
+#define _mm_cvt_roundsi64_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
+ (int)(R)); })
+
+#define _mm_cvt_roundi64_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
+ (int)(R)); })
+
+#define _mm_cvt_roundss_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v2df)_mm_undefined_pd(), \
+ (__mmask8)-1, (int)(R)); })
+
+#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)); })
+
+#define _mm_maskz_cvt_roundss_sd(U, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
+{
+ return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A),
+ (__v4sf)(__B),
+ (__v2df)(__W),
+ (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B)
+{
+ return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A),
+ (__v4sf)(__B),
+ (__v2df)_mm_setzero_pd(),
+ (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtu32_sd (__m128d __A, unsigned __B)
+{
+ return (__m128d) __builtin_ia32_cvtusi2sd32 ((__v2df) __A, __B);
+}
+
+#define _mm_cvt_roundu64_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
+ (unsigned long long)(B), (int)(R)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
+{
+ return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundu32_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
+ (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtu32_ss (__m128 __A, unsigned __B)
+{
+ return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundu64_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
+ (unsigned long long)(B), (int)(R)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
+{
+ return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
+{
+ return (__m512i) __builtin_ia32_pbroadcastd512_gpr_mask (__A, (__v16si) __O,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
+{
+ return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A, (__v8di) __O,
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set_epi32 (int __A, int __B, int __C, int __D,
+ int __E, int __F, int __G, int __H,
+ int __I, int __J, int __K, int __L,
+ int __M, int __N, int __O, int __P)
+{
+ return __extension__ (__m512i)(__v16si)
+ { __P, __O, __N, __M, __L, __K, __J, __I,
+ __H, __G, __F, __E, __D, __C, __B, __A };
+}
+
+#define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7, \
+ e8,e9,e10,e11,e12,e13,e14,e15) \
+ _mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \
+ (e5),(e4),(e3),(e2),(e1),(e0))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_set_epi64 (long long __A, long long __B, long long __C,
+ long long __D, long long __E, long long __F,
+ long long __G, long long __H)
+{
+ return __extension__ (__m512i) (__v8di)
+ { __H, __G, __F, __E, __D, __C, __B, __A };
+}
+
+#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7) \
+ _mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_set_pd (double __A, double __B, double __C, double __D,
+ double __E, double __F, double __G, double __H)
+{
+ return __extension__ (__m512d)
+ { __H, __G, __F, __E, __D, __C, __B, __A };
+}
+
+#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7) \
+ _mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_set_ps (float __A, float __B, float __C, float __D,
+ float __E, float __F, float __G, float __H,
+ float __I, float __J, float __K, float __L,
+ float __M, float __N, float __O, float __P)
+{
+ return __extension__ (__m512)
+ { __P, __O, __N, __M, __L, __K, __J, __I,
+ __H, __G, __F, __E, __D, __C, __B, __A };
+}
+
+#define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \
+ _mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \
+ (e4),(e3),(e2),(e1),(e0))
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_abs_ps(__m512 A)
+{
+ return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)A) ;
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_abs_ps(__m512 W, __mmask16 K, __m512 A)
+{
+ return (__m512)_mm512_mask_and_epi32((__m512i)W, K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)A) ;
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_abs_pd(__m512d A)
+{
+ return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)A) ;
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_abs_pd(__m512d W, __mmask8 K, __m512d A)
+{
+ return (__m512d)_mm512_mask_and_epi64((__v8di)W, K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)A);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif // __AVX512FINTRIN_H
diff --git a/current/clang-include/avx512ifmaintrin.h b/current/clang-include/avx512ifmaintrin.h
new file mode 100644
index 0000000..5defbae
--- /dev/null
+++ b/current/clang-include/avx512ifmaintrin.h
@@ -0,0 +1,92 @@
+/*===------------- avx512ifmaintrin.h - IFMA intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512ifmaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __IFMAINTRIN_H
+#define __IFMAINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma")))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_madd52hi_epu64 (__m512i __X, __m512i __Y, __m512i __Z)
+{
+ return (__m512i) __builtin_ia32_vpmadd52huq512_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di) __Z,
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_madd52hi_epu64 (__m512i __W, __mmask8 __M, __m512i __X,
+ __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_vpmadd52huq512_mask ((__v8di) __W,
+ (__v8di) __X,
+ (__v8di) __Y,
+ (__mmask8) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_madd52hi_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z)
+{
+ return (__m512i) __builtin_ia32_vpmadd52huq512_maskz ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di) __Z,
+ (__mmask8) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_madd52lo_epu64 (__m512i __X, __m512i __Y, __m512i __Z)
+{
+ return (__m512i) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di) __Z,
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_madd52lo_epu64 (__m512i __W, __mmask8 __M, __m512i __X,
+ __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __W,
+ (__v8di) __X,
+ (__v8di) __Y,
+ (__mmask8) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_madd52lo_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z)
+{
+ return (__m512i) __builtin_ia32_vpmadd52luq512_maskz ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di) __Z,
+ (__mmask8) __M);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/avx512ifmavlintrin.h b/current/clang-include/avx512ifmavlintrin.h
new file mode 100644
index 0000000..131ee5c
--- /dev/null
+++ b/current/clang-include/avx512ifmavlintrin.h
@@ -0,0 +1,149 @@
+/*===------------- avx512ifmavlintrin.h - IFMA intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512ifmavlintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __IFMAVLINTRIN_H
+#define __IFMAVLINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl")))
+
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_madd52hi_epu64 (__m128i __X, __m128i __Y, __m128i __Z)
+{
+ return (__m128i) __builtin_ia32_vpmadd52huq128_mask ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di) __Z,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_madd52hi_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_vpmadd52huq128_mask ((__v2di) __W,
+ (__v2di) __X,
+ (__v2di) __Y,
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_madd52hi_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z)
+{
+ return (__m128i) __builtin_ia32_vpmadd52huq128_maskz ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di) __Z,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_madd52hi_epu64 (__m256i __X, __m256i __Y, __m256i __Z)
+{
+ return (__m256i) __builtin_ia32_vpmadd52huq256_mask ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di) __Z,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_vpmadd52huq256_mask ((__v4di) __W,
+ (__v4di) __X,
+ (__v4di) __Y,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_madd52hi_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z)
+{
+ return (__m256i) __builtin_ia32_vpmadd52huq256_maskz ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di) __Z,
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_madd52lo_epu64 (__m128i __X, __m128i __Y, __m128i __Z)
+{
+ return (__m128i) __builtin_ia32_vpmadd52luq128_mask ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di) __Z,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_madd52lo_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_vpmadd52luq128_mask ((__v2di) __W,
+ (__v2di) __X,
+ (__v2di) __Y,
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_madd52lo_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z)
+{
+ return (__m128i) __builtin_ia32_vpmadd52luq128_maskz ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di) __Z,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_madd52lo_epu64 (__m256i __X, __m256i __Y, __m256i __Z)
+{
+ return (__m256i) __builtin_ia32_vpmadd52luq256_mask ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di) __Z,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_vpmadd52luq256_mask ((__v4di) __W,
+ (__v4di) __X,
+ (__v4di) __Y,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_madd52lo_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z)
+{
+ return (__m256i) __builtin_ia32_vpmadd52luq256_maskz ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di) __Z,
+ (__mmask8) __M);
+}
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/avx512pfintrin.h b/current/clang-include/avx512pfintrin.h
new file mode 100644
index 0000000..c7fa3cf
--- /dev/null
+++ b/current/clang-include/avx512pfintrin.h
@@ -0,0 +1,111 @@
+/*===------------- avx512pfintrin.h - PF intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512pfintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512PFINTRIN_H
+#define __AVX512PFINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512pf")))
+
+#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) __extension__ ({\
+ __builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
+ (long long const *)(addr), (int)(scale), \
+ (int)(hint)); })
+
+#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) __extension__ ({\
+ __builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \
+ (long long const *)(addr), (int)(scale), \
+ (int)(hint)); })
+
+#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) ({\
+ __builtin_ia32_gatherpfdps((__mmask16)(mask), \
+ (__v16si)(__m512i)(index), (int const *)(addr), \
+ (int)(scale), (int)(hint)); })
+
+#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) ({\
+ __builtin_ia32_gatherpfdps((__mmask16) -1, \
+ (__v16si)(__m512i)(index), (int const *)(addr), \
+ (int)(scale), (int)(hint)); })
+
+#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) __extension__ ({\
+ __builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
+ (long long const *)(addr), (int)(scale), \
+ (int)(hint)); })
+
+#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) __extension__ ({\
+ __builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \
+ (long long const *)(addr), (int)(scale), \
+ (int)(hint)); })
+
+#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) ({\
+ __builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
+ (int const *)(addr), (int)(scale), (int)(hint)); })
+
+#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) ({\
+ __builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \
+ (int const *)(addr), (int)(scale), (int)(hint)); })
+
+#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) __extension__ ({\
+ __builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \
+ (long long *)(addr), (int)(scale), \
+ (int)(hint)); })
+
+#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) __extension__ ({\
+ __builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
+ (long long *)(addr), (int)(scale), \
+ (int)(hint)); })
+
+#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) __extension__ ({\
+ __builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \
+ (int *)(addr), (int)(scale), (int)(hint)); })
+
+#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) __extension__ ({\
+ __builtin_ia32_scatterpfdps((__mmask16)(mask), \
+ (__v16si)(__m512i)(index), (int *)(addr), \
+ (int)(scale), (int)(hint)); })
+
+#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) __extension__ ({\
+ __builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \
+ (long long *)(addr), (int)(scale), \
+ (int)(hint)); })
+
+#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) __extension__ ({\
+ __builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
+ (long long *)(addr), (int)(scale), \
+ (int)(hint)); })
+
+#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) __extension__ ({\
+ __builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \
+ (int *)(addr), (int)(scale), (int)(hint)); })
+
+#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) __extension__ ({\
+ __builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
+ (int *)(addr), (int)(scale), (int)(hint)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/avx512vbmiintrin.h b/current/clang-include/avx512vbmiintrin.h
new file mode 100644
index 0000000..837238e
--- /dev/null
+++ b/current/clang-include/avx512vbmiintrin.h
@@ -0,0 +1,137 @@
+/*===------------- avx512vbmiintrin.h - VBMI intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vbmiintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __VBMIINTRIN_H
+#define __VBMIINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi")))
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_epi8 (__m512i __A, __m512i __I,
+ __mmask64 __U, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermi2varqi512_mask ((__v64qi) __A,
+ (__v64qi) __I
+ /* idx */ ,
+ (__v64qi) __B,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi8 (__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varqi512_mask ((__v64qi) __I
+ /* idx */ ,
+ (__v64qi) __A,
+ (__v64qi) __B,
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_epi8 (__m512i __A, __mmask64 __U,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varqi512_mask ((__v64qi) __I
+ /* idx */ ,
+ (__v64qi) __A,
+ (__v64qi) __B,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_epi8 (__mmask64 __U, __m512i __A,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varqi512_maskz ((__v64qi) __I
+ /* idx */ ,
+ (__v64qi) __A,
+ (__v64qi) __B,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutexvar_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B,
+ (__v64qi) __A,
+ (__v64qi) _mm512_undefined_epi32 (),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutexvar_epi8 (__mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B,
+ (__v64qi) __A,
+ (__v64qi) _mm512_setzero_si512(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutexvar_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B,
+ (__v64qi) __A,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_multishift_epi64_epi8 (__m512i __W, __mmask64 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_multishift_epi64_epi8 (__mmask64 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v64qi) _mm512_setzero_si512 (),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_multishift_epi64_epi8 (__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v64qi) _mm512_undefined_epi32 (),
+ (__mmask64) -1);
+}
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/avx512vbmivlintrin.h b/current/clang-include/avx512vbmivlintrin.h
new file mode 100644
index 0000000..105c6d1
--- /dev/null
+++ b/current/clang-include/avx512vbmivlintrin.h
@@ -0,0 +1,247 @@
+/*===------------- avx512vbmivlintrin.h - VBMI intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vbmivlintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __VBMIVLINTRIN_H
+#define __VBMIVLINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi,avx512vl")))
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi8 (__m128i __A, __m128i __I, __mmask16 __U,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermi2varqi128_mask ((__v16qi) __A,
+ (__v16qi) __I
+ /* idx */ ,
+ (__v16qi) __B,
+ (__mmask16)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi8 (__m256i __A, __m256i __I,
+ __mmask32 __U, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermi2varqi256_mask ((__v32qi) __A,
+ (__v32qi) __I
+ /* idx */ ,
+ (__v32qi) __B,
+ (__mmask32)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi8 (__m128i __A, __m128i __I, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I
+ /* idx */ ,
+ (__v16qi) __A,
+ (__v16qi) __B,
+ (__mmask16) -
+ 1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi8 (__m128i __A, __mmask16 __U, __m128i __I,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I
+ /* idx */ ,
+ (__v16qi) __A,
+ (__v16qi) __B,
+ (__mmask16)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi8 (__mmask16 __U, __m128i __A, __m128i __I,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varqi128_maskz ((__v16qi) __I
+ /* idx */ ,
+ (__v16qi) __A,
+ (__v16qi) __B,
+ (__mmask16)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi8 (__m256i __A, __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I
+ /* idx */ ,
+ (__v32qi) __A,
+ (__v32qi) __B,
+ (__mmask32) -
+ 1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi8 (__m256i __A, __mmask32 __U,
+ __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I
+ /* idx */ ,
+ (__v32qi) __A,
+ (__v32qi) __B,
+ (__mmask32)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi8 (__mmask32 __U, __m256i __A,
+ __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varqi256_maskz ((__v32qi) __I
+ /* idx */ ,
+ (__v32qi) __A,
+ (__v32qi) __B,
+ (__mmask32)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutexvar_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
+ (__v16qi) __A,
+ (__v16qi) _mm_undefined_si128 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutexvar_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
+ (__v16qi) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutexvar_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
+ (__v16qi) __A,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutexvar_epi8 (__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
+ (__v32qi) __A,
+ (__v32qi) _mm256_undefined_si256 (),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_epi8 (__mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
+ (__v32qi) __A,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
+ (__v32qi) __A,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_multishift_epi64_epi8 (__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_multishift_epi64_epi8 (__mmask16 __M, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_multishift_epi64_epi8 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v16qi)
+ _mm_undefined_si128 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_multishift_epi64_epi8 (__m256i __W, __mmask32 __M, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_multishift_epi64_epi8 (__mmask32 __M, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_multishift_epi64_epi8 (__m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v32qi)
+ _mm256_undefined_si256 (),
+ (__mmask32) -1);
+}
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/avx512vlbwintrin.h b/current/clang-include/avx512vlbwintrin.h
new file mode 100644
index 0000000..990e992
--- /dev/null
+++ b/current/clang-include/avx512vlbwintrin.h
@@ -0,0 +1,3406 @@
+/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlbwintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLBWINTRIN_H
+#define __AVX512VLBWINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw")))
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm_setzero_hi(void){
+ return (__m128i)(__v8hi){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+/* Integer compare */
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__a, (__v32qi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__a, (__v32qi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 0,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__a, (__v8hi)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__a, (__v8hi)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqw256_mask((__v16hi)__a, (__v16hi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqw256_mask((__v16hi)__a, (__v16hi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 0,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtb128_mask((__v16qi)__a, (__v16qi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtb128_mask((__v16qi)__a, (__v16qi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 6,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtb256_mask((__v32qi)__a, (__v32qi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtb256_mask((__v32qi)__a, (__v32qi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 6,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtw128_mask((__v8hi)__a, (__v8hi)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtw128_mask((__v8hi)__a, (__v8hi)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtw256_mask((__v16hi)__a, (__v16hi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtw256_mask((__v16hi)__a, (__v16hi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 6,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmple_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmple_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ __u);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){
+ return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mullo_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mullo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mullo_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W)
+{
+ return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
+ (__v16qi) __W,
+ (__v16qi) __A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W)
+{
+ return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
+ (__v32qi) __W,
+ (__v32qi) __A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W)
+{
+ return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
+ (__v8hi) __W,
+ (__v8hi) __A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
+{
+ return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
+ (__v16hi) __W,
+ (__v16hi) __A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi8 (__mmask16 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi16 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi16 (__mmask16 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packs_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) _mm_setzero_si128 (), __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packs_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packs_epi32 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packs_epi32 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packs_epi16 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packs_epi16 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) __W,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packs_epi16 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packs_epi16 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) __W,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packus_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packus_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packus_epi32 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packus_epi32 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) __W,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packus_epi16 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packus_epi16 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) __W,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packus_epi16 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packus_epi16 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) __W,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_avg_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_avg_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_avg_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_avg_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_avg_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_avg_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_avg_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_avg_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_shuffle_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_shuffle_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_shuffle_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_shuffle_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi16 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermi2varhi128_mask ((__v8hi) __A,
+ (__v8hi) __I /* idx */ ,
+ (__v8hi) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi16 (__m256i __A, __m256i __I,
+ __mmask16 __U, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermi2varhi256_mask ((__v16hi) __A,
+ (__v16hi) __I /* idx */ ,
+ (__v16hi) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi16 (__m128i __A, __m128i __I, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I/* idx */,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi16 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I/* idx */,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varhi128_maskz ((__v8hi) __I/* idx */,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi16 (__m256i __A, __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I/* idx */,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi16 (__m256i __A, __mmask16 __U,
+ __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I/* idx */,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A,
+ __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varhi256_maskz ((__v16hi) __I/* idx */,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_maddubs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_maddubs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_maddubs_epi16 (__m256i __W, __mmask16 __U, __m256i __X,
+ __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_maddubs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_madd_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_madd_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v4si) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_madd_epi16 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_madd_epi16 (__mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v8si) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi16_epi8 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi16_epi8 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi16_epi8 (__m128i __A) {
+
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovwb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
+}
+
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovuswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
+{
+ __builtin_ia32_pmovwb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
+{
+ __builtin_ia32_pmovswb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M);
+}
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhrs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
+ (__v8hi) __Y,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhrs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
+ (__v8hi) __Y,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhrs_epi16 (__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
+ (__v16hi) __Y,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhrs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
+ (__v16hi) __Y,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhi_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhi_epu16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhi_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhi_epu16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_unpackhi_epi8(__A, __B),
+ (__v16qi)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_unpackhi_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_unpackhi_epi8(__A, __B),
+ (__v32qi)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_unpackhi_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_unpackhi_epi16(__A, __B),
+ (__v8hi)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_unpackhi_epi16(__A, __B),
+ (__v8hi) _mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_unpackhi_epi16(__A, __B),
+ (__v16hi)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_unpackhi_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_unpacklo_epi8(__A, __B),
+ (__v16qi)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_unpacklo_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_unpacklo_epi8(__A, __B),
+ (__v32qi)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_unpacklo_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_unpacklo_epi16(__A, __B),
+ (__v8hi)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_unpacklo_epi16(__A, __B),
+ (__v8hi) _mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_unpacklo_epi16(__A, __B),
+ (__v16hi)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_unpacklo_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi8_epi16 (__m128i __W, __mmask32 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsxbw128_mask ((__v16qi) __A,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi8_epi16 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsxbw128_mask ((__v16qi) __A,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi8_epi16 (__m256i __W, __mmask32 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsxbw256_mask ((__v16qi) __A,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi8_epi16 (__mmask16 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsxbw256_mask ((__v16qi) __A,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu8_epi16 (__m128i __W, __mmask32 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovzxbw128_mask ((__v16qi) __A,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu8_epi16 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovzxbw128_mask ((__v16qi) __A,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu8_epi16 (__m256i __W, __mmask32 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovzxbw256_mask ((__v16qi) __A,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovzxbw256_mask ((__v16qi) __A,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+
+#define _mm_cmp_epi8_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (int)(p), \
+ (__mmask16)-1); })
+
+#define _mm_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (int)(p), \
+ (__mmask16)(m)); })
+
+#define _mm_cmp_epu8_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (int)(p), \
+ (__mmask16)-1); })
+
+#define _mm_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (int)(p), \
+ (__mmask16)(m)); })
+
+#define _mm256_cmp_epi8_mask(a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (int)(p), \
+ (__mmask32)-1); })
+
+#define _mm256_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (int)(p), \
+ (__mmask32)(m)); })
+
+#define _mm256_cmp_epu8_mask(a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (int)(p), \
+ (__mmask32)-1); })
+
+#define _mm256_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (int)(p), \
+ (__mmask32)(m)); })
+
+#define _mm_cmp_epi16_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm_cmp_epu16_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm256_cmp_epi16_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), (int)(p), \
+ (__mmask16)-1); })
+
+#define _mm256_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), (int)(p), \
+ (__mmask16)(m)); })
+
+#define _mm256_cmp_epu16_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), (int)(p), \
+ (__mmask16)-1); })
+
+#define _mm256_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), (int)(p), \
+ (__mmask16)(m)); })
+
+#define _mm_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
+ (__v8hi)(__m128i)(W)); })
+
+#define _mm_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
+ (__v8hi)_mm_setzero_hi()); })
+
+#define _mm256_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
+ (__v16hi)(__m256i)(W)); })
+
+#define _mm256_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
+ (__v16hi)_mm256_setzero_si256()); })
+
+#define _mm_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
+ (__v8hi)(__m128i)(W)); })
+
+#define _mm_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
+ (__v8hi)_mm_setzero_hi()); })
+
+#define _mm256_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shufflelo_epi16((A), \
+ (imm)), \
+ (__v16hi)(__m256i)(W)); })
+
+#define _mm256_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shufflelo_epi16((A), \
+ (imm)), \
+ (__v16hi)_mm256_setzero_si256()); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sllv_epi16 (__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sllv_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sllv_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sllv_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_hi (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sllv_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sllv_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sll_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psllw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psllw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sll_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psllw256_mask ((__v16hi) __A,
+ (__v8hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sll_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psllw256_mask ((__v16hi) __A,
+ (__v8hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+#define _mm_mask_slli_epi16(W, U, A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_psllwi128_mask((__v8hi)(__m128i)(A), (int)(B), \
+ (__v8hi)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_slli_epi16(U, A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_psllwi128_mask((__v8hi)(__m128i)(A), (int)(B), \
+ (__v8hi)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_mask_slli_epi16(W, U, A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_psllwi256_mask((__v16hi)(__m256i)(A), (int)(B), \
+ (__v16hi)(__m256i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm256_maskz_slli_epi16(U, A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_psllwi256_mask((__v16hi)(__m256i)(A), (int)(B), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (__mmask16)(U)); })
+
+
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srlv_epi16 (__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srlv_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srlv_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srlv_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_hi (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srlv_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srlv_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srav_epi16 (__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srav_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srav_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srav_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_hi (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srav_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srav_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sra_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psraw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sra_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psraw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sra_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psraw256_mask ((__v16hi) __A,
+ (__v8hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sra_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psraw256_mask ((__v16hi) __A,
+ (__v8hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+#define _mm_mask_srai_epi16(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psrawi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
+ (__v8hi)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_srai_epi16(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psrawi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
+ (__v8hi)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_mask_srai_epi16(W, U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psrawi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
+ (__v16hi)(__m256i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm256_maskz_srai_epi16(U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psrawi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (__mmask16)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srl_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrlw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrlw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srl_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psrlw256_mask ((__v16hi) __A,
+ (__v8hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srl_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psrlw256_mask ((__v16hi) __A,
+ (__v8hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+#define _mm_mask_srli_epi16(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psrlwi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
+ (__v8hi)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_srli_epi16(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psrlwi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
+ (__v8hi)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_mask_srli_epi16(W, U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psrlwi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
+ (__v16hi)(__m256i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm256_maskz_srli_epi16(U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psrlwi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (__mmask16)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
+ (__v8hi) __A,
+ (__v8hi) __W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
+ (__v8hi) __A,
+ (__v8hi) _mm_setzero_hi ());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
+ (__v16hi) __A,
+ (__v16hi) __W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
+ (__v16hi) __A,
+ (__v16hi) _mm256_setzero_si256 ());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
+ (__v16qi) __A,
+ (__v16qi) __W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
+ (__v16qi) __A,
+ (__v16qi) _mm_setzero_hi ());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
+ (__v32qi) __A,
+ (__v32qi) __W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
+ (__v32qi) __A,
+ (__v32qi) _mm256_setzero_si256 ());
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A)
+{
+ return (__m128i) __builtin_ia32_pbroadcastb128_gpr_mask (__A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_set1_epi8 (__mmask16 __M, char __A)
+{
+ return (__m128i) __builtin_ia32_pbroadcastb128_gpr_mask (__A,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A)
+{
+ return (__m256i) __builtin_ia32_pbroadcastb256_gpr_mask (__A,
+ (__v32qi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
+{
+ return (__m256i) __builtin_ia32_pbroadcastb256_gpr_mask (__A,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P,
+ (__v8hi)
+ _mm_setzero_hi (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
+{
+ __builtin_ia32_storedquhi128_mask ((__v8hi *) __P,
+ (__v8hi) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
+{
+ __builtin_ia32_storedquhi256_mask ((__v16hi *) __P,
+ (__v16hi) __A,
+ (__mmask16) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
+{
+ __builtin_ia32_storedquqi128_mask ((__v16qi *) __P,
+ (__v16qi) __A,
+ (__mmask16) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
+{
+ __builtin_ia32_storedquqi256_mask ((__v32qi *) __P,
+ (__v32qi) __A,
+ (__mmask32) __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_test_epi8_mask (__m128i __A, __m128i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestmb128 ((__v16qi) __A,
+ (__v16qi) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestmb128 ((__v16qi) __A,
+ (__v16qi) __B, __U);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_test_epi8_mask (__m256i __A, __m256i __B)
+{
+ return (__mmask32) __builtin_ia32_ptestmb256 ((__v32qi) __A,
+ (__v32qi) __B,
+ (__mmask32) -1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_test_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__mmask32) __builtin_ia32_ptestmb256 ((__v32qi) __A,
+ (__v32qi) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_test_epi16_mask (__m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmw128 ((__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_test_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmw128 ((__v8hi) __A,
+ (__v8hi) __B, __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_test_epi16_mask (__m256i __A, __m256i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestmw256 ((__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestmw256 ((__v16hi) __A,
+ (__v16hi) __B, __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_testn_epi8_mask (__m128i __A, __m128i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestnmb128 ((__v16qi) __A,
+ (__v16qi) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_testn_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestnmb128 ((__v16qi) __A,
+ (__v16qi) __B, __U);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_testn_epi8_mask (__m256i __A, __m256i __B)
+{
+ return (__mmask32) __builtin_ia32_ptestnmb256 ((__v32qi) __A,
+ (__v32qi) __B,
+ (__mmask32) -1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_testn_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__mmask32) __builtin_ia32_ptestnmb256 ((__v32qi) __A,
+ (__v32qi) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_testn_epi16_mask (__m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmw128 ((__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_testn_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmw128 ((__v8hi) __A,
+ (__v8hi) __B, __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_testn_epi16_mask (__m256i __A, __m256i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestnmw256 ((__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_testn_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestnmw256 ((__v16hi) __A,
+ (__v16hi) __B, __U);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_movepi8_mask (__m128i __A)
+{
+ return (__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_movepi8_mask (__m256i __A)
+{
+ return (__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_movepi16_mask (__m128i __A)
+{
+ return (__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_movepi16_mask (__m256i __A)
+{
+ return (__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movm_epi8 (__mmask16 __A)
+{
+ return (__m128i) __builtin_ia32_cvtmask2b128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_movm_epi8 (__mmask32 __A)
+{
+ return (__m256i) __builtin_ia32_cvtmask2b256 (__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movm_epi16 (__mmask8 __A)
+{
+ return (__m128i) __builtin_ia32_cvtmask2w128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_movm_epi16 (__mmask16 __A)
+{
+ return (__m256i) __builtin_ia32_cvtmask2w256 (__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A)
+{
+ return (__m128i)__builtin_ia32_selectb_128(__M,
+ (__v16qi) _mm_broadcastb_epi8(__A),
+ (__v16qi) __O);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A)
+{
+ return (__m128i)__builtin_ia32_selectb_128(__M,
+ (__v16qi) _mm_broadcastb_epi8(__A),
+ (__v16qi) _mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A)
+{
+ return (__m256i)__builtin_ia32_selectb_256(__M,
+ (__v32qi) _mm256_broadcastb_epi8(__A),
+ (__v32qi) __O);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A)
+{
+ return (__m256i)__builtin_ia32_selectb_256(__M,
+ (__v32qi) _mm256_broadcastb_epi8(__A),
+ (__v32qi) _mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i)__builtin_ia32_selectw_128(__M,
+ (__v8hi) _mm_broadcastw_epi16(__A),
+ (__v8hi) __O);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i)__builtin_ia32_selectw_128(__M,
+ (__v8hi) _mm_broadcastw_epi16(__A),
+ (__v8hi) _mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A)
+{
+ return (__m256i)__builtin_ia32_selectw_256(__M,
+ (__v16hi) _mm256_broadcastw_epi16(__A),
+ (__v16hi) __O);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A)
+{
+ return (__m256i)__builtin_ia32_selectw_256(__M,
+ (__v16hi) _mm256_broadcastw_epi16(__A),
+ (__v16hi) _mm256_setzero_si256());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A)
+{
+ return (__m256i) __builtin_ia32_pbroadcastw256_gpr_mask (__A,
+ (__v16hi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_set1_epi16 (__mmask16 __M, short __A)
+{
+ return (__m256i) __builtin_ia32_pbroadcastw256_gpr_mask (__A,
+ (__v16hi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A)
+{
+ return (__m128i) __builtin_ia32_pbroadcastw128_gpr_mask (__A,
+ (__v8hi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_set1_epi16 (__mmask8 __M, short __A)
+{
+ return (__m128i) __builtin_ia32_pbroadcastw128_gpr_mask (__A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutexvar_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
+ (__v8hi) __A,
+ (__v8hi) _mm_undefined_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutexvar_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
+ (__v8hi) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutexvar_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
+ (__v8hi) __A,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutexvar_epi16 (__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
+ (__v16hi) __A,
+ (__v16hi) _mm256_undefined_si256 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_epi16 (__mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
+ (__v16hi) __A,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
+ (__v16hi) __A,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+#define _mm_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_palignr128_mask((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(N), \
+ (__v16qi)(__m128i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_palignr128_mask((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(N), \
+ (__v16qi)_mm_setzero_si128(), \
+ (__mmask16)(U)); })
+
+#define _mm256_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \
+ (__m256i)__builtin_ia32_palignr256_mask((__v32qi)(__m256i)(A), \
+ (__v32qi)(__m256i)(B), (int)(N), \
+ (__v32qi)(__m256i)(W), \
+ (__mmask32)(U)); })
+
+#define _mm256_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \
+ (__m256i)__builtin_ia32_palignr256_mask((__v32qi)(__m256i)(A), \
+ (__v32qi)(__m256i)(B), (int)(N), \
+ (__v32qi)_mm256_setzero_si256(), \
+ (__mmask32)(U)); })
+
+#define _mm_dbsad_epu8(A, B, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(imm), \
+ (__v8hi)_mm_setzero_hi(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_dbsad_epu8(W, U, A, B, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(imm), \
+ (__v8hi)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_dbsad_epu8(U, A, B, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(imm), \
+ (__v8hi)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_dbsad_epu8(A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
+ (__v32qi)(__m256i)(B), (int)(imm), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (__mmask16)-1); })
+
+#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
+ (__v32qi)(__m256i)(B), (int)(imm), \
+ (__v16hi)(__m256i)(W), \
+ (__mmask16)(U)); })
+
+#define _mm256_maskz_dbsad_epu8(U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
+ (__v32qi)(__m256i)(B), (int)(imm), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (__mmask16)(U)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVX512VLBWINTRIN_H */
diff --git a/current/clang-include/avx512vlcdintrin.h b/current/clang-include/avx512vlcdintrin.h
new file mode 100644
index 0000000..7b02e2e
--- /dev/null
+++ b/current/clang-include/avx512vlcdintrin.h
@@ -0,0 +1,263 @@
+/*===---- avx512vlcdintrin.h - AVX512VL and AVX512CD intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlcdintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLCDINTRIN_H
+#define __AVX512VLCDINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512cd")))
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastmb_epi64 (__mmask8 __A)
+{
+ return (__m128i) __builtin_ia32_broadcastmb128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastmb_epi64 (__mmask8 __A)
+{
+ return (__m256i) __builtin_ia32_broadcastmb256 (__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastmw_epi32 (__mmask16 __A)
+{
+ return (__m128i) __builtin_ia32_broadcastmw128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastmw_epi32 (__mmask16 __A)
+{
+ return (__m256i) __builtin_ia32_broadcastmw256 (__A);
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_conflict_epi64 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
+ (__v2di) _mm_undefined_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_conflict_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_conflict_epi64 (__m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
+ (__v4di) _mm256_undefined_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
+ (__v4di) _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_conflict_epi32 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
+ (__v4si) _mm_undefined_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
+ (__v4si) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_conflict_epi32 (__m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
+ (__v8si) _mm256_undefined_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_lzcnt_epi32 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_lzcnt_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_lzcnt_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_lzcnt_epi32 (__m256i __A)
+{
+ return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_lzcnt_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_lzcnt_epi32 (__mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_lzcnt_epi64 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_lzcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_lzcnt_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_lzcnt_epi64 (__m256i __A)
+{
+ return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_lzcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVX512VLCDINTRIN_H */
diff --git a/current/clang-include/avx512vldqintrin.h b/current/clang-include/avx512vldqintrin.h
new file mode 100644
index 0000000..8187bcd
--- /dev/null
+++ b/current/clang-include/avx512vldqintrin.h
@@ -0,0 +1,1265 @@
+/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vldqintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLDQINTRIN_H
+#define __AVX512VLDQINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq")))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mullo_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) ((__v4du) __A * (__v4du) __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mullo_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mullo_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mullo_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) ((__v2du) __A * (__v2du) __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mullo_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mullo_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_and_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_and_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_and_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_and_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_and_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_xor_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_xor_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_xor_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_xor_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_xor_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_or_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_or_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_or_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_or_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_or_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_or_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_or_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_or_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epi64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epi64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epu64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epu64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epi64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epi64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epu64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epu64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepi64_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_pd (__m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepi64_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_ps (__m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epi64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epi64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epu64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epu64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epi64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epi64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epu64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epu64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepu64_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepu64_pd (__m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepu64_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtepu64_ps (__m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+#define _mm_range_pd(A, B, C) __extension__ ({ \
+ (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_range_pd(W, U, A, B, C) __extension__ ({ \
+ (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_range_pd(U, A, B, C) __extension__ ({ \
+ (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm256_range_pd(A, B, C) __extension__ ({ \
+ (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), (int)(C), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_range_pd(W, U, A, B, C) __extension__ ({ \
+ (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), (int)(C), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_range_pd(U, A, B, C) __extension__ ({ \
+ (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), (int)(C), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm_range_ps(A, B, C) __extension__ ({ \
+ (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_range_ps(W, U, A, B, C) __extension__ ({ \
+ (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), \
+ (__v4sf)(__m128)(W), (__mmask8)(U)); })
+
+#define _mm_maskz_range_ps(U, A, B, C) __extension__ ({ \
+ (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)); })
+
+#define _mm256_range_ps(A, B, C) __extension__ ({ \
+ (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(C), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_range_ps(W, U, A, B, C) __extension__ ({ \
+ (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(C), \
+ (__v8sf)(__m256)(W), (__mmask8)(U)); })
+
+#define _mm256_maskz_range_ps(U, A, B, C) __extension__ ({ \
+ (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(C), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)); })
+
+#define _mm_reduce_pd(A, B) __extension__ ({ \
+ (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+ (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_reduce_pd(U, A, B) __extension__ ({ \
+ (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm256_reduce_pd(A, B) __extension__ ({ \
+ (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+ (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_reduce_pd(U, A, B) __extension__ ({ \
+ (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm_reduce_ps(A, B) __extension__ ({ \
+ (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_reduce_ps(W, U, A, B) __extension__ ({ \
+ (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_reduce_ps(U, A, B) __extension__ ({ \
+ (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)); })
+
+#define _mm256_reduce_ps(A, B) __extension__ ({ \
+ (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_reduce_ps(W, U, A, B) __extension__ ({ \
+ (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_reduce_ps(U, A, B) __extension__ ({ \
+ (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)); })
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_movepi32_mask (__m128i __A)
+{
+ return (__mmask8) __builtin_ia32_cvtd2mask128 ((__v4si) __A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_movepi32_mask (__m256i __A)
+{
+ return (__mmask8) __builtin_ia32_cvtd2mask256 ((__v8si) __A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movm_epi32 (__mmask8 __A)
+{
+ return (__m128i) __builtin_ia32_cvtmask2d128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_movm_epi32 (__mmask8 __A)
+{
+ return (__m256i) __builtin_ia32_cvtmask2d256 (__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movm_epi64 (__mmask8 __A)
+{
+ return (__m128i) __builtin_ia32_cvtmask2q128 (__A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_movm_epi64 (__mmask8 __A)
+{
+ return (__m256i) __builtin_ia32_cvtmask2q256 (__A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_movepi64_mask (__m128i __A)
+{
+ return (__mmask8) __builtin_ia32_cvtq2mask128 ((__v2di) __A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_movepi64_mask (__m256i __A)
+{
+ return (__mmask8) __builtin_ia32_cvtq2mask256 ((__v4di) __A);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcast_f32x2 (__m128 __A)
+{
+ return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A,
+ (__v8sf)_mm256_undefined_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_f32x2 (__m256 __O, __mmask8 __M, __m128 __A)
+{
+ return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A,
+ (__v8sf) __O,
+ __M);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A)
+{
+ return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A,
+ (__v8sf) _mm256_setzero_ps (),
+ __M);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcast_f64x2 (__m128d __A)
+{
+ return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) __A,
+ (__v4df)_mm256_undefined_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_f64x2 (__m256d __O, __mmask8 __M, __m128d __A)
+{
+ return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) __A,
+ (__v4df) __O,
+ __M);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
+{
+ return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) __A,
+ (__v4df) _mm256_setzero_ps (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcast_i32x2 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) __A,
+ (__v4si)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_broadcast_i32x2 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) __A,
+ (__v4si) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) __A,
+ (__v4si) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcast_i32x2 (__m128i __A)
+{
+ return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) __A,
+ (__v8si)_mm256_undefined_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_i32x2 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) __A,
+ (__v8si) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) __A,
+ (__v8si) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcast_i64x2 (__m128i __A)
+{
+ return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) __A,
+ (__v4di)_mm256_undefined_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_i64x2 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) __A,
+ (__v4di) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) __A,
+ (__v4di) _mm256_setzero_si256 (),
+ __M);
+}
+
+#define _mm256_extractf64x2_pd(A, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm256_extracti64x2_epi64(A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+ (int)(imm), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+ (int)(imm), \
+ (__v2di)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+ (int)(imm), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)(U)); })
+
+#define _mm256_insertf64x2(A, B, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(imm), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm256_inserti64x2(A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
+ (__v2di)(__m128i)(B), \
+ (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
+ (__v2di)(__m128i)(B), \
+ (int)(imm), \
+ (__v4di)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
+ (__v2di)(__m128i)(B), \
+ (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+#define _mm_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm_fpclass_pd_mask(A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm256_fpclass_pd_mask(A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm_fpclass_ps_mask(A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm256_fpclass_ps_mask(A, imm) __extension__ ({ \
+ (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
+ (__mmask8)-1); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/avx512vlintrin.h b/current/clang-include/avx512vlintrin.h
new file mode 100644
index 0000000..295ce29
--- /dev/null
+++ b/current/clang-include/avx512vlintrin.h
@@ -0,0 +1,9170 @@
+/*===---- avx512vlintrin.h - AVX512VL intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLINTRIN_H
+#define __AVX512VLINTRIN_H
+
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl")))
+
+/* Doesn't require avx512vl, used in avx512dqintrin.h */
+static __inline __m128i __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
+_mm_setzero_di(void) {
+ return (__m128i)(__v2di){ 0LL, 0LL};
+}
+
+/* Integer compare */
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 0,
+ __u);
+}
+
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ __u);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mullo_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mullo_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_and_si256(__A, __B),
+ (__v8si)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)_mm256_mask_and_epi32(_mm256_setzero_si256(), __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_and_si128(__A, __B),
+ (__v4si)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)_mm_mask_and_epi32(_mm_setzero_si128(), __U, __A, __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_andnot_si256(__A, __B),
+ (__v8si)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)_mm256_mask_andnot_epi32(_mm256_setzero_si256(),
+ __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_andnot_si128(__A, __B),
+ (__v4si)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_or_si256(__A, __B),
+ (__v8si)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)_mm256_mask_or_epi32(_mm256_setzero_si256(), __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_or_si128(__A, __B),
+ (__v4si)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)_mm_mask_or_epi32(_mm_setzero_si128(), __U, __A, __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_xor_si256(__A, __B),
+ (__v8si)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)_mm256_mask_xor_epi32(_mm256_setzero_si256(), __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_xor_si128(__A, __B),
+ (__v4si)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)_mm_mask_xor_epi32(_mm_setzero_si128(), __U, __A, __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_and_si256(__A, __B),
+ (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)_mm256_mask_and_epi64(_mm256_setzero_si256(), __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_and_si128(__A, __B),
+ (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)_mm_mask_and_epi64(_mm_setzero_si128(), __U, __A, __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_andnot_si256(__A, __B),
+ (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)_mm256_mask_andnot_epi64(_mm256_setzero_si256(),
+ __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_andnot_si128(__A, __B),
+ (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)_mm_mask_andnot_epi64(_mm_setzero_si128(), __U, __A, __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_or_si256(__A, __B),
+ (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)_mm256_mask_or_epi64(_mm256_setzero_si256(), __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_or_si128(__A, __B),
+ (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)_mm_mask_or_epi64(_mm_setzero_si128(), __U, __A, __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_xor_si256(__A, __B),
+ (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)_mm256_mask_xor_epi64(_mm256_setzero_si256(), __U, __A, __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_xor_si128(__A, __B),
+ (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)_mm_mask_xor_epi64(_mm_setzero_si128(), __U, __A, __B);
+}
+
+#define _mm_cmp_epi32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm_cmp_epu32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm256_cmp_epi32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm256_cmp_epu32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm_cmp_epi64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm_cmp_epu64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm256_cmp_epi64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm256_cmp_epu64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm256_cmp_ps_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm256_cmp_pd_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm_cmp_ps_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+#define _mm_cmp_pd_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (int)(p), \
+ (__mmask8)-1); })
+
+#define _mm_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (int)(p), \
+ (__mmask8)(m)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask3 (-(__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask3 (-(__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask3 (-(__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask3 (-(__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B,
+ __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmsubpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmsubpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmsubps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmsubps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmsubaddpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmsubaddpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmsubaddps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmsubaddps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfnmaddpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfnmaddpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfnmaddps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfnmaddps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfnmsubpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfnmsubpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfnmsubpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfnmsubpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfnmsubps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfnmsubps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfnmsubps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfnmsubps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_add_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_add_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_add_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_add_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_add_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_add_ps (__mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_add_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_add_ps (__mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) {
+ return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
+ (__v4si) __W,
+ (__v4si) __A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) {
+ return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
+ (__v8si) __W,
+ (__v8si) __A);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) {
+ return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
+ (__v2df) __W,
+ (__v2df) __A);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) {
+ return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
+ (__v4df) __W,
+ (__v4df) __A);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) {
+ return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
+ (__v4sf) __W,
+ (__v4sf) __A);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) {
+ return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
+ (__v8sf) __W,
+ (__v8sf) __A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) {
+ return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
+ (__v2di) __W,
+ (__v2di) __A);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) {
+ return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
+ (__v4di) __W,
+ (__v4di) __A);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) {
+ __builtin_ia32_compressstoredf128_mask ((__v2df *) __P,
+ (__v2df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) {
+ __builtin_ia32_compressstoredf256_mask ((__v4df *) __P,
+ (__v4df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) {
+ __builtin_ia32_compressstoredi128_mask ((__v2di *) __P,
+ (__v2di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) {
+ __builtin_ia32_compressstoredi256_mask ((__v4di *) __P,
+ (__v4di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) {
+ __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P,
+ (__v4sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) {
+ __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P,
+ (__v8sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) {
+ __builtin_ia32_compressstoresi128_mask ((__v4si *) __P,
+ (__v4si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) {
+ __builtin_ia32_compressstoresi256_mask ((__v8si *) __P,
+ (__v8si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_ps (__mmask16 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_ps (__mmask16 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epu32 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epu32 (__m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) {
+ return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
+ return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) {
+ return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
+ return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epu32 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epu32 (__m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epu32 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epu32 (__m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epu32 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epu32 (__m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepu32_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_pd (__m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepu32_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_ps (__m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_div_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_div_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_div_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_div_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_div_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_div_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_div_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_div_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) {
+ return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
+ (__v2df) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
+ return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) {
+ return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
+ (__v4df) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
+ return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
+ (__v2di) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U,
+ void const *__P) {
+ return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
+ (__v4di) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
+ return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) {
+ return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
+ return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) {
+ return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
+ return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
+ (__v4si) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U,
+ void const *__P) {
+ return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
+ (__v8si) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
+ return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_getexp_pd (__m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_getexp_pd (__m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_getexp_ps (__m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_getexp_ps (__m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_max_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_max_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_max_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_max_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_max_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_max_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_max_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_max_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_min_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_min_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_min_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_min_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_min_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_min_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_min_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_min_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_mul_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_mul_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_mul_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_mul_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_mul_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_mul_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi64 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi64 (__m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+#define _mm_roundscale_pd(A, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1); })
+
+
+#define _mm_mask_roundscale_pd(W, U, A, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+ (int)(imm), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)); })
+
+
+#define _mm_maskz_roundscale_pd(U, A, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)); })
+
+
+#define _mm256_roundscale_pd(A, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)-1); })
+
+
+#define _mm256_mask_roundscale_pd(W, U, A, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)); })
+
+
+#define _mm256_maskz_roundscale_pd(U, A, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm_roundscale_ps(A, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1); })
+
+
+#define _mm_mask_roundscale_ps(W, U, A, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)); })
+
+
+#define _mm_maskz_roundscale_ps(U, A, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)); })
+
+#define _mm256_roundscale_ps(A, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_roundscale_ps(W, U, A, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U)); })
+
+
+#define _mm256_maskz_roundscale_ps(U, A, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_scalef_pd (__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_scalef_pd (__m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_scalef_ps (__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_scalef_ps (__m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+#define _mm_i64scatter_pd(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)-1, \
+ (__v2di)(__m128i)(index), \
+ (__v2df)(__m128d)(v1), (int)(scale)); })
+
+#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)(mask), \
+ (__v2di)(__m128i)(index), \
+ (__v2df)(__m128d)(v1), (int)(scale)); })
+
+#define _mm_i64scatter_epi64(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)-1, \
+ (__v2di)(__m128i)(index), \
+ (__v2di)(__m128i)(v1), (int)(scale)); })
+
+#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)(mask), \
+ (__v2di)(__m128i)(index), \
+ (__v2di)(__m128i)(v1), (int)(scale)); })
+
+#define _mm256_i64scatter_pd(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)-1, \
+ (__v4di)(__m256i)(index), \
+ (__v4df)(__m256d)(v1), (int)(scale)); })
+
+#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)(mask), \
+ (__v4di)(__m256i)(index), \
+ (__v4df)(__m256d)(v1), (int)(scale)); })
+
+#define _mm256_i64scatter_epi64(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)-1, \
+ (__v4di)(__m256i)(index), \
+ (__v4di)(__m256i)(v1), (int)(scale)); })
+
+#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)(mask), \
+ (__v4di)(__m256i)(index), \
+ (__v4di)(__m256i)(v1), (int)(scale)); })
+
+#define _mm_i64scatter_ps(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)-1, \
+ (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
+ (int)(scale)); })
+
+#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)(mask), \
+ (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
+ (int)(scale)); })
+
+#define _mm_i64scatter_epi32(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)-1, \
+ (__v2di)(__m128i)(index), \
+ (__v4si)(__m128i)(v1), (int)(scale)); })
+
+#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)(mask), \
+ (__v2di)(__m128i)(index), \
+ (__v4si)(__m128i)(v1), (int)(scale)); })
+
+#define _mm256_i64scatter_ps(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)-1, \
+ (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
+ (int)(scale)); })
+
+#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)(mask), \
+ (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
+ (int)(scale)); })
+
+#define _mm256_i64scatter_epi32(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)-1, \
+ (__v4di)(__m256i)(index), \
+ (__v4si)(__m128i)(v1), (int)(scale)); })
+
+#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)(mask), \
+ (__v4di)(__m256i)(index), \
+ (__v4si)(__m128i)(v1), (int)(scale)); })
+
+#define _mm_i32scatter_pd(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), \
+ (__v2df)(__m128d)(v1), (int)(scale)); })
+
+#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), \
+ (__v2df)(__m128d)(v1), (int)(scale)); })
+
+#define _mm_i32scatter_epi64(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), \
+ (__v2di)(__m128i)(v1), (int)(scale)); })
+
+#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), \
+ (__v2di)(__m128i)(v1), (int)(scale)); })
+
+#define _mm256_i32scatter_pd(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), \
+ (__v4df)(__m256d)(v1), (int)(scale)); })
+
+#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), \
+ (__v4df)(__m256d)(v1), (int)(scale)); })
+
+#define _mm256_i32scatter_epi64(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), \
+ (__v4di)(__m256i)(v1), (int)(scale)); })
+
+#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), \
+ (__v4di)(__m256i)(v1), (int)(scale)); })
+
+#define _mm_i32scatter_ps(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
+ (int)(scale)); })
+
+#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
+ (int)(scale)); })
+
+#define _mm_i32scatter_epi32(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), \
+ (__v4si)(__m128i)(v1), (int)(scale)); })
+
+#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), \
+ (__v4si)(__m128i)(v1), (int)(scale)); })
+
+#define _mm256_i32scatter_ps(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)-1, \
+ (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
+ (int)(scale)); })
+
+#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)(mask), \
+ (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
+ (int)(scale)); })
+
+#define _mm256_i32scatter_epi32(addr, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)-1, \
+ (__v8si)(__m256i)(index), \
+ (__v8si)(__m256i)(v1), (int)(scale)); })
+
+#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)(mask), \
+ (__v8si)(__m256i)(index), \
+ (__v8si)(__m256i)(v1), (int)(scale)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_sqrt_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_sqrt_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_sqrt_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_sqrt_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sub_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sub_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_sub_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sub_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sub_ps (__mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_sub_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_ps (__mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi32 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermi2vard128_mask ((__v4si) __A,
+ (__v4si) __I
+ /* idx */ ,
+ (__v4si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi32 (__m256i __A, __m256i __I,
+ __mmask8 __U, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermi2vard256_mask ((__v8si) __A,
+ (__v8si) __I
+ /* idx */ ,
+ (__v8si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_pd (__m128d __A, __m128i __I, __mmask8 __U,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermi2varpd128_mask ((__v2df) __A,
+ (__v2di) __I
+ /* idx */ ,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_pd (__m256d __A, __m256i __I, __mmask8 __U,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermi2varpd256_mask ((__v4df) __A,
+ (__v4di) __I
+ /* idx */ ,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_ps (__m128 __A, __m128i __I, __mmask8 __U,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermi2varps128_mask ((__v4sf) __A,
+ (__v4si) __I
+ /* idx */ ,
+ (__v4sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_ps (__m256 __A, __m256i __I, __mmask8 __U,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermi2varps256_mask ((__v8sf) __A,
+ (__v8si) __I
+ /* idx */ ,
+ (__v8sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi64 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermi2varq128_mask ((__v2di) __A,
+ (__v2di) __I
+ /* idx */ ,
+ (__v2di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi64 (__m256i __A, __m256i __I,
+ __mmask8 __U, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermi2varq256_mask ((__v4di) __A,
+ (__v4di) __I
+ /* idx */ ,
+ (__v4di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi32 (__m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi32 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi32 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_maskz ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi32 (__m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi32 (__m256i __A, __mmask8 __U, __m256i __I,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi32 (__mmask8 __U, __m256i __A,
+ __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_maskz ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_permutex2var_pd (__m128d __A, __m128i __I, __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) -
+ 1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_pd (__m128d __A, __mmask8 __U, __m128i __I,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_pd (__mmask8 __U, __m128d __A, __m128i __I,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_maskz ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_permutex2var_pd (__m256d __A, __m256i __I, __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8) -
+ 1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_pd (__m256d __A, __mmask8 __U, __m256i __I,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_pd (__mmask8 __U, __m256d __A, __m256i __I,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_maskz ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_permutex2var_ps (__m128 __A, __m128i __I, __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_ps (__m128 __A, __mmask8 __U, __m128i __I,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_ps (__mmask8 __U, __m128 __A, __m128i __I,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_maskz ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_permutex2var_ps (__m256 __A, __m256i __I, __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_ps (__m256 __A, __mmask8 __U, __m256i __I,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_ps (__mmask8 __U, __m256 __A, __m256i __I,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_maskz ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi64 (__m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi64 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi64 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_maskz ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8)
+ __U);
+}
+
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi64 (__m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi64 (__m256i __A, __mmask8 __U, __m256i __I,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi64 (__mmask8 __U, __m256i __A,
+ __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_maskz ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi8_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsxbd128_mask ((__v16qi) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsxbd128_mask ((__v16qi) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsxbd256_mask ((__v16qi) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsxbd256_mask ((__v16qi) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi8_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsxbq128_mask ((__v16qi) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsxbq128_mask ((__v16qi) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi8_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsxbq256_mask ((__v16qi) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsxbq256_mask ((__v16qi) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_epi64 (__m128i __W, __mmask8 __U, __m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovsxdq128_mask ((__v4si) __X,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_epi64 (__mmask8 __U, __m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovsxdq128_mask ((__v4si) __X,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_epi64 (__m256i __W, __mmask8 __U, __m128i __X)
+{
+ return (__m256i) __builtin_ia32_pmovsxdq256_mask ((__v4si) __X,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_epi64 (__mmask8 __U, __m128i __X)
+{
+ return (__m256i) __builtin_ia32_pmovsxdq256_mask ((__v4si) __X,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi16_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsxwd128_mask ((__v8hi) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsxwd128_mask ((__v8hi) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi16_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsxwd256_mask ((__v8hi) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsxwd256_mask ((__v8hi) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi16_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsxwq128_mask ((__v8hi) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsxwq128_mask ((__v8hi) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi16_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsxwq256_mask ((__v8hi) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovsxwq256_mask ((__v8hi) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu8_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovzxbd128_mask ((__v16qi) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu8_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovzxbd128_mask ((__v16qi) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovzxbd256_mask ((__v16qi) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu8_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovzxbd256_mask ((__v16qi) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu8_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovzxbq128_mask ((__v16qi) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovzxbq128_mask ((__v16qi) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu8_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovzxbq256_mask ((__v16qi) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovzxbq256_mask ((__v16qi) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu32_epi64 (__m128i __W, __mmask8 __U, __m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovzxdq128_mask ((__v4si) __X,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu32_epi64 (__mmask8 __U, __m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovzxdq128_mask ((__v4si) __X,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu32_epi64 (__m256i __W, __mmask8 __U, __m128i __X)
+{
+ return (__m256i) __builtin_ia32_pmovzxdq256_mask ((__v4si) __X,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu32_epi64 (__mmask8 __U, __m128i __X)
+{
+ return (__m256i) __builtin_ia32_pmovzxdq256_mask ((__v4si) __X,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu16_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovzxwd128_mask ((__v8hi) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu16_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovzxwd128_mask ((__v8hi) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu16_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovzxwd256_mask ((__v8hi) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu16_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovzxwd256_mask ((__v8hi) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu16_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovzxwq128_mask ((__v8hi) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovzxwq128_mask ((__v8hi) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu16_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovzxwq256_mask ((__v8hi) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_pmovzxwq256_mask ((__v8hi) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+
+#define _mm_rol_epi32(a, b) __extension__ ({\
+ (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_rol_epi32(w, u, a, b) __extension__ ({\
+ (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \
+ (__v4si)(__m128i)(w), (__mmask8)(u)); })
+
+#define _mm_maskz_rol_epi32(u, a, b) __extension__ ({\
+ (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(u)); })
+
+#define _mm256_rol_epi32(a, b) __extension__ ({\
+ (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_rol_epi32(w, u, a, b) __extension__ ({\
+ (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \
+ (__v8si)(__m256i)(w), (__mmask8)(u)); })
+
+#define _mm256_maskz_rol_epi32(u, a, b) __extension__ ({\
+ (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(u)); })
+
+#define _mm_rol_epi64(a, b) __extension__ ({\
+ (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_rol_epi64(w, u, a, b) __extension__ ({\
+ (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \
+ (__v2di)(__m128i)(w), (__mmask8)(u)); })
+
+#define _mm_maskz_rol_epi64(u, a, b) __extension__ ({\
+ (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)(u)); })
+
+#define _mm256_rol_epi64(a, b) __extension__ ({\
+ (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_rol_epi64(w, u, a, b) __extension__ ({\
+ (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \
+ (__v4di)(__m256i)(w), (__mmask8)(u)); })
+
+#define _mm256_maskz_rol_epi64(u, a, b) __extension__ ({\
+ (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(u)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rolv_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_rolv_epi32 (__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rolv_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_rolv_epi64 (__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+#define _mm_ror_epi32(A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_ror_epi32(W, U, A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \
+ (__v4si)(__m128i)(W), (__mmask8)(U)); })
+
+#define _mm_maskz_ror_epi32(U, A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_ror_epi32(A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_ror_epi32(W, U, A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \
+ (__v8si)(__m256i)(W), (__mmask8)(U)); })
+
+#define _mm256_maskz_ror_epi32(U, A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+#define _mm_ror_epi64(A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_ror_epi64(W, U, A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \
+ (__v2di)(__m128i)(W), (__mmask8)(U)); })
+
+#define _mm_maskz_ror_epi64(U, A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)(U)); })
+
+#define _mm256_ror_epi64(A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_ror_epi64(W, U, A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \
+ (__v4di)(__m256i)(W), (__mmask8)(U)); })
+
+#define _mm256_maskz_ror_epi64(U, A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sll_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pslld128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sll_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pslld128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sll_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m128i __B)
+{
+ return (__m256i) __builtin_ia32_pslld256_mask ((__v8si) __A,
+ (__v4si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sll_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+ return (__m256i) __builtin_ia32_pslld256_mask ((__v8si) __A,
+ (__v4si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+#define _mm_mask_slli_epi32(W, U, A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_pslldi128_mask((__v4si)(__m128i)(A), (int)(B), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_slli_epi32(U, A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_pslldi128_mask((__v4si)(__m128i)(A), (int)(B), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_mask_slli_epi32(W, U, A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_pslldi256_mask((__v8si)(__m256i)(A), (int)(B), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_slli_epi32(U, A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_pslldi256_mask((__v8si)(__m256i)(A), (int)(B), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sll_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psllq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sll_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psllq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sll_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psllq256_mask ((__v4di) __A,
+ (__v2di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sll_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psllq256_mask ((__v4di) __A,
+ (__v2di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+#define _mm_mask_slli_epi64(W, U, A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_psllqi128_mask((__v2di)(__m128i)(A), (int)(B), \
+ (__v2di)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_slli_epi64(U, A, B) __extension__ ({ \
+ (__m128i)__builtin_ia32_psllqi128_mask((__v2di)(__m128i)(A), (int)(B), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)(U)); })
+
+#define _mm256_mask_slli_epi64(W, U, A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_psllqi256_mask((__v4di)(__m256i)(A), (int)(B), \
+ (__v4di)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_slli_epi64(U, A, B) __extension__ ({ \
+ (__m256i)__builtin_ia32_psllqi256_mask((__v4di)(__m256i)(A), (int)(B), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rorv_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_rorv_epi32 (__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rorv_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_rorv_epi64 (__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sllv_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psllv2di_mask ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sllv_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psllv2di_mask ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sllv_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psllv4di_mask ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sllv_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psllv4di_mask ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sllv_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psllv4si_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sllv_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psllv4si_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sllv_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psllv8si_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sllv_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psllv8si_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srlv_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psrlv2di_mask ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srlv_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psrlv2di_mask ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srlv_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psrlv4di_mask ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srlv_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psrlv4di_mask ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srlv_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psrlv4si_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srlv_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psrlv4si_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srlv_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psrlv8si_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srlv_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psrlv8si_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srl_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrld128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srl_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrld128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srl_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psrld256_mask ((__v8si) __A,
+ (__v4si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srl_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psrld256_mask ((__v8si) __A,
+ (__v4si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+#define _mm_mask_srli_epi32(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psrldi128_mask((__v4si)(__m128i)(A), (int)(imm), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_srli_epi32(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psrldi128_mask((__v4si)(__m128i)(A), (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_mask_srli_epi32(W, U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psrldi256_mask((__v8si)(__m256i)(A), (int)(imm), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_srli_epi32(U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psrldi256_mask((__v8si)(__m256i)(A), (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srl_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrlq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srl_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrlq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srl_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psrlq256_mask ((__v4di) __A,
+ (__v2di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srl_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psrlq256_mask ((__v4di) __A,
+ (__v2di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+#define _mm_mask_srli_epi64(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psrlqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
+ (__v2di)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_srli_epi64(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psrlqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
+ (__v2di)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_mask_srli_epi64(W, U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psrlqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
+ (__v4di)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_srli_epi64(U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psrlqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srav_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psrav4si_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srav_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psrav4si_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srav_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psrav8si_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srav_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psrav8si_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srav_epi64 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srav_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srav_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
+ (__v2di) __Y,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srav_epi64 (__m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srav_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
+ (__v4di) __Y,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
+ (__v4si) __A,
+ (__v4si) __W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
+ (__v4si) __A,
+ (__v4si) _mm_setzero_si128 ());
+}
+
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
+ (__v8si) __A,
+ (__v8si) __W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
+ (__v8si) __A,
+ (__v8si) _mm256_setzero_si256 ());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
+ (__v4si) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_load_epi32 (__mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
+ (__v8si) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
+{
+ __builtin_ia32_movdqa32store128_mask ((__v4si *) __P,
+ (__v4si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A)
+{
+ __builtin_ia32_movdqa32store256_mask ((__v8si *) __P,
+ (__v8si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
+ (__v2di) __A,
+ (__v2di) __W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
+ (__v2di) __A,
+ (__v2di) _mm_setzero_di ());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
+ (__v4di) __A,
+ (__v4di) __W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
+ (__v4di) __A,
+ (__v4di) _mm256_setzero_si256 ());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
+ (__v2di) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_load_epi64 (__mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
+ (__v4di) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
+{
+ __builtin_ia32_movdqa64store128_mask ((__v2di *) __P,
+ (__v2di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A)
+{
+ __builtin_ia32_movdqa64store256_mask ((__v4di *) __P,
+ (__v4di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_movedup_pd(__A),
+ (__v2df)__W);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_movedup_pd (__mmask8 __U, __m128d __A)
+{
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_movedup_pd(__A),
+ (__v2df)_mm_setzero_pd());
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_movedup_pd(__A),
+ (__v4df)__W);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A)
+{
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_movedup_pd(__A),
+ (__v4df)_mm256_setzero_pd());
+}
+
+
+#define _mm_mask_set1_epi32(O, M, A) __extension__ ({ \
+ (__m128i)__builtin_ia32_pbroadcastd128_gpr_mask((int)(A), \
+ (__v4si)(__m128i)(O), \
+ (__mmask8)(M)); })
+
+#define _mm_maskz_set1_epi32(M, A) __extension__ ({ \
+ (__m128i)__builtin_ia32_pbroadcastd128_gpr_mask((int)(A), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(M)); })
+
+#define _mm256_mask_set1_epi32(O, M, A) __extension__ ({ \
+ (__m256i)__builtin_ia32_pbroadcastd256_gpr_mask((int)(A), \
+ (__v8si)(__m256i)(O), \
+ (__mmask8)(M)); })
+
+#define _mm256_maskz_set1_epi32(M, A) __extension__ ({ \
+ (__m256i)__builtin_ia32_pbroadcastd256_gpr_mask((int)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(M)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A)
+{
+ return (__m128i) __builtin_ia32_pbroadcastq128_gpr_mask (__A, (__v2di) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_set1_epi64 (__mmask8 __M, long long __A)
+{
+ return (__m128i) __builtin_ia32_pbroadcastq128_gpr_mask (__A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A)
+{
+ return (__m256i) __builtin_ia32_pbroadcastq256_gpr_mask (__A, (__v4di) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
+{
+ return (__m256i) __builtin_ia32_pbroadcastq256_gpr_mask (__A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+#define _mm_fixupimm_pd(A, B, C, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \
+ (__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), \
+ (int)(imm), (__mmask8)(U)); })
+
+#define _mm256_fixupimm_pd(A, B, C, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), \
+ (__v4di)(__m256i)(C), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), \
+ (__v4di)(__m256i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), \
+ (__v4di)(__m256i)(C), \
+ (int)(imm), (__mmask8)(U)); })
+
+#define _mm_fixupimm_ps(A, B, C, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm256_fixupimm_ps(A, B, C, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), \
+ (__v8si)(__m256i)(C), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), \
+ (__v8si)(__m256i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), \
+ (__v8si)(__m256i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
+{
+ return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_load_pd (__mmask8 __U, void const *__P)
+{
+ return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P)
+{
+ return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_load_pd (__mmask8 __U, void const *__P)
+{
+ return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P)
+{
+ return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_load_ps (__mmask8 __U, void const *__P)
+{
+ return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P)
+{
+ return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_load_ps (__mmask8 __U, void const *__P)
+{
+ return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P)
+{
+ return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_pd (__mmask8 __U, void const *__P)
+{
+ return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P)
+{
+ return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P)
+{
+ return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P)
+{
+ return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_ps (__mmask8 __U, void const *__P)
+{
+ return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P)
+{
+ return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P)
+{
+ return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A)
+{
+ __builtin_ia32_storeapd128_mask ((__v2df *) __P,
+ (__v2df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A)
+{
+ __builtin_ia32_storeapd256_mask ((__v4df *) __P,
+ (__v4df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A)
+{
+ __builtin_ia32_storeaps128_mask ((__v4sf *) __P,
+ (__v4sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A)
+{
+ __builtin_ia32_storeaps256_mask ((__v8sf *) __P,
+ (__v8sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
+{
+ __builtin_ia32_storedqudi128_mask ((__v2di *) __P,
+ (__v2di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
+{
+ __builtin_ia32_storedqudi256_mask ((__v4di *) __P,
+ (__v4di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
+{
+ __builtin_ia32_storedqusi128_mask ((__v4si *) __P,
+ (__v4si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A)
+{
+ __builtin_ia32_storedqusi256_mask ((__v8si *) __P,
+ (__v8si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A)
+{
+ __builtin_ia32_storeupd128_mask ((__v2df *) __P,
+ (__v2df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A)
+{
+ __builtin_ia32_storeupd256_mask ((__v4df *) __P,
+ (__v4df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A)
+{
+ __builtin_ia32_storeups128_mask ((__v4sf *) __P,
+ (__v4sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A)
+{
+ __builtin_ia32_storeups256_mask ((__v8sf *) __P,
+ (__v8sf) __A,
+ (__mmask8) __U);
+}
+
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_unpackhi_pd(__A, __B),
+ (__v2df)__W);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_unpackhi_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
+{
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_unpackhi_pd(__A, __B),
+ (__v4df)__W);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B)
+{
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_unpackhi_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_unpackhi_ps(__A, __B),
+ (__v4sf)__W);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_unpackhi_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_unpackhi_ps(__A, __B),
+ (__v8sf)__W);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B)
+{
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_unpackhi_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_unpacklo_pd(__A, __B),
+ (__v2df)__W);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_unpacklo_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
+{
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_unpacklo_pd(__A, __B),
+ (__v4df)__W);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B)
+{
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_unpacklo_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_unpacklo_ps(__A, __B),
+ (__v4sf)__W);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B)
+{
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_unpacklo_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_unpacklo_ps(__A, __B),
+ (__v8sf)__W);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B)
+{
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_unpacklo_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_rcp14_pd (__m128d __A)
+{
+ return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+ return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A)
+{
+ return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_rcp14_pd (__m256d __A)
+{
+ return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+ return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A)
+{
+ return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp14_ps (__m128 __A)
+{
+ return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+ return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A)
+{
+ return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_rcp14_ps (__m256 __A)
+{
+ return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+ return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A)
+{
+ return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+#define _mm_mask_permute_pd(W, U, X, C) __extension__ ({ \
+ (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm_permute_pd((X), (C)), \
+ (__v2df)(__m128d)(W)); })
+
+#define _mm_maskz_permute_pd(U, X, C) __extension__ ({ \
+ (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm_permute_pd((X), (C)), \
+ (__v2df)_mm_setzero_pd()); })
+
+#define _mm256_mask_permute_pd(W, U, X, C) __extension__ ({ \
+ (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_permute_pd((X), (C)), \
+ (__v4df)(__m256d)(W)); })
+
+#define _mm256_maskz_permute_pd(U, X, C) __extension__ ({ \
+ (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_permute_pd((X), (C)), \
+ (__v4df)_mm256_setzero_pd()); })
+
+#define _mm_mask_permute_ps(W, U, X, C) __extension__ ({ \
+ (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm_permute_ps((X), (C)), \
+ (__v4sf)(__m128)(W)); })
+
+#define _mm_maskz_permute_ps(U, X, C) __extension__ ({ \
+ (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm_permute_ps((X), (C)), \
+ (__v4sf)_mm_setzero_ps()); })
+
+#define _mm256_mask_permute_ps(W, U, X, C) __extension__ ({ \
+ (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_permute_ps((X), (C)), \
+ (__v8sf)(__m256)(W)); })
+
+#define _mm256_maskz_permute_ps(U, X, C) __extension__ ({ \
+ (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_permute_ps((X), (C)), \
+ (__v8sf)_mm256_setzero_ps()); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_permutevar_pd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128i __C)
+{
+ return (__m128d) __builtin_ia32_vpermilvarpd_mask ((__v2df) __A,
+ (__v2di) __C,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_permutevar_pd (__mmask8 __U, __m128d __A, __m128i __C)
+{
+ return (__m128d) __builtin_ia32_vpermilvarpd_mask ((__v2df) __A,
+ (__v2di) __C,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_permutevar_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256i __C)
+{
+ return (__m256d) __builtin_ia32_vpermilvarpd256_mask ((__v4df) __A,
+ (__v4di) __C,
+ (__v4df) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_permutevar_pd (__mmask8 __U, __m256d __A, __m256i __C)
+{
+ return (__m256d) __builtin_ia32_vpermilvarpd256_mask ((__v4df) __A,
+ (__v4di) __C,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_permutevar_ps (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128i __C)
+{
+ return (__m128) __builtin_ia32_vpermilvarps_mask ((__v4sf) __A,
+ (__v4si) __C,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_permutevar_ps (__mmask8 __U, __m128 __A, __m128i __C)
+{
+ return (__m128) __builtin_ia32_vpermilvarps_mask ((__v4sf) __A,
+ (__v4si) __C,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_permutevar_ps (__m256 __W, __mmask8 __U, __m256 __A,
+ __m256i __C)
+{
+ return (__m256) __builtin_ia32_vpermilvarps256_mask ((__v8sf) __A,
+ (__v8si) __C,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_permutevar_ps (__mmask8 __U, __m256 __A, __m256i __C)
+{
+ return (__m256) __builtin_ia32_vpermilvarps256_mask ((__v8sf) __A,
+ (__v8si) __C,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_test_epi32_mask (__m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmd128 ((__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_test_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmd128 ((__v4si) __A,
+ (__v4si) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_test_epi32_mask (__m256i __A, __m256i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmd256 ((__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_test_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmd256 ((__v8si) __A,
+ (__v8si) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_test_epi64_mask (__m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmq128 ((__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_test_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmq128 ((__v2di) __A,
+ (__v2di) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_test_epi64_mask (__m256i __A, __m256i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmq256 ((__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_test_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmq256 ((__v4di) __A,
+ (__v4di) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_testn_epi32_mask (__m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmd128 ((__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_testn_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmd128 ((__v4si) __A,
+ (__v4si) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_testn_epi32_mask (__m256i __A, __m256i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmd256 ((__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_testn_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmd256 ((__v8si) __A,
+ (__v8si) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_testn_epi64_mask (__m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmq128 ((__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_testn_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmq128 ((__v2di) __A,
+ (__v2di) __B, __U);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_testn_epi64_mask (__m256i __A, __m256i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmq256 ((__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_testn_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestnmq256 ((__v4di) __A,
+ (__v4di) __B, __U);
+}
+
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_unpackhi_epi32(__A, __B),
+ (__v4si)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_unpackhi_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_unpackhi_epi32(__A, __B),
+ (__v8si)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_unpackhi_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_unpackhi_epi64(__A, __B),
+ (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_unpackhi_epi64(__A, __B),
+ (__v2di)_mm_setzero_di());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_unpackhi_epi64(__A, __B),
+ (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_unpackhi_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_unpacklo_epi32(__A, __B),
+ (__v4si)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_unpacklo_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_unpacklo_epi32(__A, __B),
+ (__v8si)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_unpacklo_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_unpacklo_epi64(__A, __B),
+ (__v2di)__W);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_unpacklo_epi64(__A, __B),
+ (__v2di)_mm_setzero_di());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_unpacklo_epi64(__A, __B),
+ (__v4di)__W);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_unpacklo_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sra_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrad128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sra_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psrad128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sra_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psrad256_mask ((__v8si) __A,
+ (__v4si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sra_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psrad256_mask ((__v8si) __A,
+ (__v4si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+#define _mm_mask_srai_epi32(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psradi128_mask((__v4si)(__m128i)(A), (int)(imm), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_srai_epi32(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psradi128_mask((__v4si)(__m128i)(A), (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_mask_srai_epi32(W, U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psradi256_mask((__v8si)(__m256i)(A), (int)(imm), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_srai_epi32(U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psradi256_mask((__v8si)(__m256i)(A), (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sra_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sra_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sra_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_di (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sra_epi64 (__m256i __A, __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
+ (__v2di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sra_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
+ (__v2di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sra_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+ return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
+ (__v2di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+#define _mm_srai_epi64(A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_srai_epi64(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
+ (__v2di)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_srai_epi64(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
+ (__v2di)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_srai_epi64(A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_srai_epi64(W, U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
+ (__v4di)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_srai_epi64(U, A, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+#define _mm_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm256_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), \
+ (__v8si)(__m256i)(C), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), \
+ (__v8si)(__m256i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), \
+ (__v8si)(__m256i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm256_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), \
+ (__v4di)(__m256i)(C), (int)(imm), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), \
+ (__v4di)(__m256i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), \
+ (__v4di)(__m256i)(C), (int)(imm), \
+ (__mmask8)(U)); })
+
+
+
+#define _mm256_shuffle_f32x4(A, B, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_shuf_f32x4_256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_shuf_f32x4_256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(imm), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_shuf_f32x4_256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)); })
+
+#define _mm256_shuffle_f64x2(A, B, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_shuf_f64x2_256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), \
+ (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_shuf_f64x2_256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), \
+ (int)(imm), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) __extension__ ({ \
+ (__m256d)__builtin_ia32_shuf_f64x2_256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), \
+ (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm256_shuffle_i32x4(A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_shuf_i32x4_256_mask((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), \
+ (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_shuf_i32x4_256_mask((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), \
+ (int)(imm), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_shuf_i32x4_256_mask((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), \
+ (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+#define _mm256_shuffle_i64x2(A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_shuf_i64x2_256_mask((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), \
+ (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_shuf_i64x2_256_mask((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), \
+ (int)(imm), \
+ (__v4di)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_shuf_i64x2_256_mask((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), \
+ (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+#define _mm_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+ (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm_shuffle_pd((A), (B), (M)), \
+ (__v2df)(__m128d)(W)); })
+
+#define _mm_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+ (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm_shuffle_pd((A), (B), (M)), \
+ (__v2df)_mm_setzero_pd()); })
+
+#define _mm256_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+ (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
+ (__v4df)(__m256d)(W)); })
+
+#define _mm256_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+ (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
+ (__v4df)_mm256_setzero_pd()); })
+
+#define _mm_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+ (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
+ (__v4sf)(__m128)(W)); })
+
+#define _mm_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+ (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
+ (__v4sf)_mm_setzero_ps()); })
+
+#define _mm256_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+ (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
+ (__v8sf)(__m256)(W)); })
+
+#define _mm256_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+ (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
+ (__v8sf)_mm256_setzero_ps()); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_rsqrt14_pd (__m128d __A)
+{
+ return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+ return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A)
+{
+ return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_rsqrt14_pd (__m256d __A)
+{
+ return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+ return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A)
+{
+ return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt14_ps (__m128 __A)
+{
+ return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+ return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A)
+{
+ return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_rsqrt14_ps (__m256 __A)
+{
+ return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+ return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A)
+{
+ return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcast_f32x4 (__m128 __A)
+{
+ return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A,
+ (__v8sf)_mm256_undefined_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_f32x4 (__m256 __O, __mmask8 __M, __m128 __A)
+{
+ return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A,
+ (__v8sf) __O,
+ __M);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A)
+{
+ return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A,
+ (__v8sf) _mm256_setzero_ps (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcast_i32x4 (__m128i __A)
+{
+ return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si) __A,
+ (__v8si)_mm256_undefined_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcast_i32x4 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si) __A,
+ (__v8si)
+ __O, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcast_i32x4 (__mmask8 __M, __m128i __A)
+{
+ return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si)
+ __A,
+ (__v8si) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A)
+{
+ return (__m256d)__builtin_ia32_selectpd_256(__M,
+ (__v4df) _mm256_broadcastsd_pd(__A),
+ (__v4df) __O);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
+{
+ return (__m256d)__builtin_ia32_selectpd_256(__M,
+ (__v4df) _mm256_broadcastsd_pd(__A),
+ (__v4df) _mm256_setzero_pd());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A)
+{
+ return (__m128)__builtin_ia32_selectps_128(__M,
+ (__v4sf) _mm_broadcastss_ps(__A),
+ (__v4sf) __O);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
+{
+ return (__m128)__builtin_ia32_selectps_128(__M,
+ (__v4sf) _mm_broadcastss_ps(__A),
+ (__v4sf) _mm_setzero_ps());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A)
+{
+ return (__m256)__builtin_ia32_selectps_256(__M,
+ (__v8sf) _mm256_broadcastss_ps(__A),
+ (__v8sf) __O);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
+{
+ return (__m256)__builtin_ia32_selectps_256(__M,
+ (__v8sf) _mm256_broadcastss_ps(__A),
+ (__v8sf) _mm256_setzero_ps());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i)__builtin_ia32_selectd_128(__M,
+ (__v4si) _mm_broadcastd_epi32(__A),
+ (__v4si) __O);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i)__builtin_ia32_selectd_128(__M,
+ (__v4si) _mm_broadcastd_epi32(__A),
+ (__v4si) _mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m256i)__builtin_ia32_selectd_256(__M,
+ (__v8si) _mm256_broadcastd_epi32(__A),
+ (__v8si) __O);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
+{
+ return (__m256i)__builtin_ia32_selectd_256(__M,
+ (__v8si) _mm256_broadcastd_epi32(__A),
+ (__v8si) _mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i)__builtin_ia32_selectq_128(__M,
+ (__v2di) _mm_broadcastq_epi64(__A),
+ (__v2di) __O);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i)__builtin_ia32_selectq_128(__M,
+ (__v2di) _mm_broadcastq_epi64(__A),
+ (__v2di) _mm_setzero_si128());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m256i)__builtin_ia32_selectq_256(__M,
+ (__v4di) _mm256_broadcastq_epi64(__A),
+ (__v4di) __O);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
+{
+ return (__m256i)__builtin_ia32_selectq_256(__M,
+ (__v4di) _mm256_broadcastq_epi64(__A),
+ (__v4di) _mm256_setzero_si256());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi32_epi8 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
+ (__v16qi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi32_epi8 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
+ (__v16qi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi32_epi16 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
+ (__v8hi)_mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
+ (__v8hi)__O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi32_epi16 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
+ (__v8hi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi64_epi8 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
+ (__v16qi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi64_epi8 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
+ (__v16qi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi64_epi32 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
+ (__v4si)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
+ (__v4si) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
+ (__v4si) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi64_epi32 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
+ (__v4si)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
+ (__v4si)__O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
+ (__v4si) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi64_epi16 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
+ (__v8hi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi64_epi16 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
+ (__v8hi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi32_epi8 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
+ (__v16qi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi32_epi8 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
+ (__v16qi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi32_epi16 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
+ (__v8hi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi32_epi16 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
+ (__v8hi) _mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi64_epi8 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
+ (__v16qi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi64_epi8 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
+ (__v16qi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi64_epi32 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
+ (__v4si)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
+ (__v4si) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
+ (__v4si) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi64_epi32 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
+ (__v4si)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
+ (__v4si) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
+ (__v4si) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi64_epi16 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
+ (__v8hi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi64_epi16 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
+ (__v8hi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+ return __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi32_epi8 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
+ (__v16qi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_epi8 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
+ (__v16qi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi32_epi16 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_epi16 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
+ (__v8hi)_mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi64_epi8 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
+ (__v16qi) _mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_epi8 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
+ (__v16qi) _mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
+ (__v16qi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi64_epi32 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
+ (__v4si)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
+ (__v4si) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
+ (__v4si) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_epi32 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
+ (__v4si) _mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
+ (__v4si) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
+ (__v4si) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi64_epi16 (__m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
+ (__v8hi) _mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
+ (__v8hi)__O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+ __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_epi16 (__m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
+ (__v8hi)_mm_undefined_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
+ (__v8hi) __O, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A)
+{
+ return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+ __builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
+}
+
+#define _mm256_extractf32x4_ps(A, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+ (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+ (int)(imm), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_extractf32x4_ps(U, A, imm) __extension__ ({ \
+ (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+ (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)); })
+
+#define _mm256_extracti32x4_epi32(A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+ (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+ (int)(imm), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+ (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_insertf32x4(A, B, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_insertf32x4_256_mask((__v8sf)(__m256)(A), \
+ (__v4sf)(__m128)(B), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_insertf32x4_256_mask((__v8sf)(__m256)(A), \
+ (__v4sf)(__m128)(B), (int)(imm), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \
+ (__m256)__builtin_ia32_insertf32x4_256_mask((__v8sf)(__m256)(A), \
+ (__v4sf)(__m128)(B), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)); })
+
+#define _mm256_inserti32x4(A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_inserti32x4_256_mask((__v8si)(__m256i)(A), \
+ (__v4si)(__m128i)(B), \
+ (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_inserti32x4_256_mask((__v8si)(__m256i)(A), \
+ (__v4si)(__m128i)(B), \
+ (int)(imm), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_inserti32x4_256_mask((__v8si)(__m256i)(A), \
+ (__v4si)(__m128i)(B), \
+ (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+#define _mm_getmant_pd(A, B, C) __extension__({\
+ (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_getmant_pd(W, U, A, B, C) __extension__({\
+ (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_getmant_pd(U, A, B, C) __extension__({\
+ (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm256_getmant_pd(A, B, C) __extension__ ({ \
+ (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_getmant_pd(W, U, A, B, C) __extension__ ({ \
+ (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_getmant_pd(U, A, B, C) __extension__ ({ \
+ (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)); })
+
+#define _mm_getmant_ps(A, B, C) __extension__ ({ \
+ (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \
+ (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_getmant_ps(U, A, B, C) __extension__ ({ \
+ (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)); })
+
+#define _mm256_getmant_ps(A, B, C) __extension__ ({ \
+ (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \
+ (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_getmant_ps(U, A, B, C) __extension__ ({ \
+ (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)); })
+
+#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
+ (double const *)(addr), \
+ (__v2di)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
+ (long long const *)(addr), \
+ (__v2di)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
+ (double const *)(addr), \
+ (__v4di)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
+ (long long const *)(addr), \
+ (__v4di)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
+ (float const *)(addr), \
+ (__v2di)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
+ (int const *)(addr), \
+ (__v2di)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
+ (float const *)(addr), \
+ (__v4di)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
+ (int const *)(addr), \
+ (__v4di)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
+ (double const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
+ (long long const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
+ (double const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
+ (long long const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
+ (float const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
+ (int const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
+ (float const *)(addr), \
+ (__v8si)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+ (__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
+ (int const *)(addr), \
+ (__v8si)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)); })
+
+#define _mm256_permutex_pd(X, C) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(X), \
+ (__v4df)_mm256_undefined_pd(), \
+ ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
+ ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
+
+#define _mm256_mask_permutex_pd(W, U, X, C) __extension__ ({ \
+ (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_permutex_pd((X), (C)), \
+ (__v4df)(__m256d)(W)); })
+
+#define _mm256_maskz_permutex_pd(U, X, C) __extension__ ({ \
+ (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_permutex_pd((X), (C)), \
+ (__v4df)_mm256_setzero_pd()); })
+
+#define _mm256_permutex_epi64(X, C) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v4di)(__m256i)(X), \
+ (__v4di)_mm256_undefined_si256(), \
+ ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
+ ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
+
+#define _mm256_mask_permutex_epi64(W, U, X, C) __extension__ ({ \
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_permutex_epi64((X), (C)), \
+ (__v4di)(__m256i)(W)); })
+
+#define _mm256_maskz_permutex_epi64(U, X, C) __extension__ ({ \
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_permutex_epi64((X), (C)), \
+ (__v4di)_mm256_setzero_si256()); })
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_permutexvar_pd (__m256i __X, __m256d __Y)
+{
+ return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
+ (__v4di) __X,
+ (__v4df) _mm256_undefined_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_pd (__m256d __W, __mmask8 __U, __m256i __X,
+ __m256d __Y)
+{
+ return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
+ (__v4di) __X,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_pd (__mmask8 __U, __m256i __X, __m256d __Y)
+{
+ return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
+ (__v4di) __X,
+ (__v4df) _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
+ (__v4di) __X,
+ (__v4di) _mm256_setzero_si256 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutexvar_epi64 ( __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
+ (__v4di) __X,
+ (__v4di) _mm256_undefined_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_epi64 (__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
+ (__v4di) __X,
+ (__v4di) __W,
+ __M);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_ps (__m256 __W, __mmask8 __U, __m256i __X,
+ __m256 __Y)
+{
+ return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
+ (__v8si) __X,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_ps (__mmask8 __U, __m256i __X, __m256 __Y)
+{
+ return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
+ (__v8si) __X,
+ (__v8sf) _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_permutexvar_ps (__m256i __X, __m256 __Y)
+{
+ return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
+ (__v8si) __X,
+ (__v8sf) _mm256_undefined_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutexvar_epi32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
+ (__v8si) __X,
+ (__v8si) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutexvar_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
+ (__v8si) __X,
+ (__v8si) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutexvar_epi32 (__m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
+ (__v8si) __X,
+ (__v8si) _mm256_undefined_si256(),
+ (__mmask8) -1);
+}
+
+#define _mm_alignr_epi32(A, B, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_alignd128_mask((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm), \
+ (__v4si)_mm_undefined_si128(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_alignd128_mask((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_alignr_epi32(U, A, B, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_alignd128_mask((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+#define _mm256_alignr_epi32(A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_alignd256_mask((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm), \
+ (__v8si)_mm256_undefined_si256(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_alignd256_mask((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_alignr_epi32(U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_alignd256_mask((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+#define _mm_alignr_epi64(A, B, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (int)(imm), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)-1); })
+
+#define _mm_mask_alignr_epi64(W, U, A, B, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (int)(imm), \
+ (__v2di)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_alignr_epi64(U, A, B, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (int)(imm), \
+ (__v2di)_mm_setzero_di(), \
+ (__mmask8)(U)); })
+
+#define _mm256_alignr_epi64(A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_alignq256_mask((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm), \
+ (__v4di)_mm256_undefined_pd(), \
+ (__mmask8)-1); })
+
+#define _mm256_mask_alignr_epi64(W, U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_alignq256_mask((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm), \
+ (__v4di)(__m256i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_alignr_epi64(U, A, B, imm) __extension__ ({ \
+ (__m256i)__builtin_ia32_alignq256_mask((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_movehdup_ps(__A),
+ (__v4sf)__W);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A)
+{
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_movehdup_ps(__A),
+ (__v4sf)_mm_setzero_ps());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_movehdup_ps(__A),
+ (__v8sf)__W);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A)
+{
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_movehdup_ps(__A),
+ (__v8sf)_mm256_setzero_ps());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_moveldup_ps(__A),
+ (__v4sf)__W);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A)
+{
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_moveldup_ps(__A),
+ (__v4sf)_mm_setzero_ps());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_moveldup_ps(__A),
+ (__v8sf)__W);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A)
+{
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_moveldup_ps(__A),
+ (__v8sf)_mm256_setzero_ps());
+}
+
+#define _mm256_mask_shuffle_epi32(W, U, A, I) __extension__({\
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shuffle_epi32((A), (I)), \
+ (__v8si)(__m256i)(W)); })
+
+#define _mm256_maskz_shuffle_epi32(U, A, I) __extension__({\
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shuffle_epi32((A), (I)), \
+ (__v8si)_mm256_setzero_si256()); })
+
+#define _mm_mask_shuffle_epi32(W, U, A, I) __extension__({\
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shuffle_epi32((A), (I)), \
+ (__v4si)(__m128i)(W)); })
+
+#define _mm_maskz_shuffle_epi32(U, A, I) __extension__({\
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shuffle_epi32((A), (I)), \
+ (__v4si)_mm_setzero_si128()); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+ return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
+ (__v2df) __A,
+ (__v2df) __W);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_mov_pd (__mmask8 __U, __m128d __A)
+{
+ return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
+ (__v2df) __A,
+ (__v2df) _mm_setzero_pd ());
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+ return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
+ (__v4df) __A,
+ (__v4df) __W);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A)
+{
+ return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
+ (__v4df) __A,
+ (__v4df) _mm256_setzero_pd ());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+ return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
+ (__v4sf) __A,
+ (__v4sf) __W);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_mov_ps (__mmask8 __U, __m128 __A)
+{
+ return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
+ (__v4sf) __A,
+ (__v4sf) _mm_setzero_ps ());
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+ return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
+ (__v8sf) __A,
+ (__v8sf) __W);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A)
+{
+ return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
+ (__v8sf) __A,
+ (__v8sf) _mm256_setzero_ps ());
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
+{
+ return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A)
+{
+ return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
+{
+ return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m128 __A)
+{
+ return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, _MM_FROUND_CUR_DIRECTION,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_ph (__mmask8 __U, __m128 __A)
+{
+ return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, _MM_FROUND_CUR_DIRECTION,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+#define _mm_mask_cvt_roundps_ph(W, U, A, I) __extension__ ({ \
+ (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
+ (__v8hi)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm_maskz_cvt_roundps_ph(U, A, I) __extension__ ({ \
+ (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
+ (__v8hi)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m256 __A)
+{
+ return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, _MM_FROUND_CUR_DIRECTION,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_ph ( __mmask8 __U, __m256 __A)
+{
+ return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, _MM_FROUND_CUR_DIRECTION,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+#define _mm256_mask_cvt_roundps_ph(W, U, A, I) __extension__ ({ \
+ (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
+ (__v8hi)(__m128i)(W), \
+ (__mmask8)(U)); })
+
+#define _mm256_maskz_cvt_roundps_ph(U, A, I) __extension__ ({ \
+ (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
+ (__v8hi)_mm_setzero_si128(), \
+ (__mmask8)(U)); })
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVX512VLINTRIN_H */
diff --git a/current/clang-include/avxintrin.h b/current/clang-include/avxintrin.h
new file mode 100644
index 0000000..86bfdfb
--- /dev/null
+++ b/current/clang-include/avxintrin.h
@@ -0,0 +1,2928 @@
+/*===---- avxintrin.h - AVX intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVXINTRIN_H
+#define __AVXINTRIN_H
+
+typedef double __v4df __attribute__ ((__vector_size__ (32)));
+typedef float __v8sf __attribute__ ((__vector_size__ (32)));
+typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+typedef int __v8si __attribute__ ((__vector_size__ (32)));
+typedef short __v16hi __attribute__ ((__vector_size__ (32)));
+typedef char __v32qi __attribute__ ((__vector_size__ (32)));
+
+/* Unsigned types */
+typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32)));
+typedef unsigned int __v8su __attribute__ ((__vector_size__ (32)));
+typedef unsigned short __v16hu __attribute__ ((__vector_size__ (32)));
+typedef unsigned char __v32qu __attribute__ ((__vector_size__ (32)));
+
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v32qs __attribute__((__vector_size__(32)));
+
+typedef float __m256 __attribute__ ((__vector_size__ (32)));
+typedef double __m256d __attribute__((__vector_size__(32)));
+typedef long long __m256i __attribute__((__vector_size__(32)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx")))
+
+/* Arithmetic */
+/// \brief Adds two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDPD / ADDPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x double] containing the sums of both
+/// operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_add_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4df)__a+(__v4df)__b);
+}
+
+/// \brief Adds two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDPS / ADDPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x float] containing the sums of both
+/// operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_add_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8sf)__a+(__v8sf)__b);
+}
+
+/// \brief Subtracts two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSUBPD / SUBPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing the minuend.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing the subtrahend.
+/// \returns A 256-bit vector of [4 x double] containing the differences between
+/// both operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_sub_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4df)__a-(__v4df)__b);
+}
+
+/// \brief Subtracts two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSUBPS / SUBPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the minuend.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing the subtrahend.
+/// \returns A 256-bit vector of [8 x float] containing the differences between
+/// both operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_sub_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8sf)__a-(__v8sf)__b);
+}
+
+/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+/// two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDSUBPD / ADDSUBPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing the left source operand.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing the right source operand.
+/// \returns A 256-bit vector of [4 x double] containing the alternating sums
+/// and differences between both operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_addsub_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
+}
+
+/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+/// two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDSUBPS / ADDSUBPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the left source operand.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing the right source operand.
+/// \returns A 256-bit vector of [8 x float] containing the alternating sums and
+/// differences between both operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_addsub_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
+}
+
+/// \brief Divides two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VDIVPD / DIVPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing the dividend.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing the divisor.
+/// \returns A 256-bit vector of [4 x double] containing the quotients of both
+/// operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_div_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4df)__a/(__v4df)__b);
+}
+
+/// \brief Divides two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VDIVPS / DIVPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the dividend.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing the divisor.
+/// \returns A 256-bit vector of [8 x float] containing the quotients of both
+/// operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_div_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8sf)__a/(__v8sf)__b);
+}
+
+/// \brief Compares two 256-bit vectors of [4 x double] and returns the greater
+/// of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMAXPD / MAXPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing one of the operands.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing one of the operands.
+/// \returns A 256-bit vector of [4 x double] containing the maximum values
+/// between both operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_max_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
+}
+
+/// \brief Compares two 256-bit vectors of [8 x float] and returns the greater
+/// of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMAXPS / MAXPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing one of the operands.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing one of the operands.
+/// \returns A 256-bit vector of [8 x float] containing the maximum values
+/// between both operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_max_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
+}
+
+/// \brief Compares two 256-bit vectors of [4 x double] and returns the lesser
+/// of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMINPD / MINPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing one of the operands.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing one of the operands.
+/// \returns A 256-bit vector of [4 x double] containing the minimum values
+/// between both operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_min_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
+}
+
+/// \brief Compares two 256-bit vectors of [8 x float] and returns the lesser
+/// of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMINPS / MINPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing one of the operands.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing one of the operands.
+/// \returns A 256-bit vector of [8 x float] containing the minimum values
+/// between both operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_min_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
+}
+
+/// \brief Multiplies two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMULPD / MULPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing one of the operands.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing one of the operands.
+/// \returns A 256-bit vector of [4 x double] containing the products of both
+/// operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_mul_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4df)__a * (__v4df)__b);
+}
+
+/// \brief Multiplies two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMULPS / MULPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing one of the operands.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing one of the operands.
+/// \returns A 256-bit vector of [8 x float] containing the products of both
+/// operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_mul_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8sf)__a * (__v8sf)__b);
+}
+
+/// \brief Calculates the square roots of the values in a 256-bit vector of
+/// [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSQRTPD / SQRTPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double].
+/// \returns A 256-bit vector of [4 x double] containing the square roots of the
+/// values in the operand.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_sqrt_pd(__m256d __a)
+{
+ return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
+}
+
+/// \brief Calculates the square roots of the values in a 256-bit vector of
+/// [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSQRTPS / SQRTPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the square roots of the
+/// values in the operand.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_sqrt_ps(__m256 __a)
+{
+ return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
+}
+
+/// \brief Calculates the reciprocal square roots of the values in a 256-bit
+/// vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRSQRTPS / RSQRTPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the reciprocal square
+/// roots of the values in the operand.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_rsqrt_ps(__m256 __a)
+{
+ return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
+}
+
+/// \brief Calculates the reciprocals of the values in a 256-bit vector of
+/// [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRCPPS / RCPPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the reciprocals of the
+/// values in the operand.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_rcp_ps(__m256 __a)
+{
+ return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
+}
+
+/// \brief Rounds the values in a 256-bit vector of [4 x double] as specified
+/// by the byte operand. The source values are rounded to integer values and
+/// returned as 64-bit double-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_round_pd(__m256d V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPD / ROUNDPD instruction.
+///
+/// \param V
+/// A 256-bit vector of [4 x double].
+/// \param M
+/// An integer value that specifies the rounding operation.
+/// Bits [7:4] are reserved.
+/// Bit [3] is a precision exception value:
+/// 0: A normal PE exception is used.
+/// 1: The PE field is not updated.
+/// Bit [2] is the rounding control source:
+/// 0: Use bits [1:0] of M.
+/// 1: Use the current MXCSR setting.
+/// Bits [1:0] contain the rounding control definition:
+/// 00: Nearest.
+/// 01: Downward (toward negative infinity).
+/// 10: Upward (toward positive infinity).
+/// 11: Truncated.
+/// \returns A 256-bit vector of [4 x double] containing the rounded values.
+#define _mm256_round_pd(V, M) __extension__ ({ \
+ (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
+
+/// \brief Rounds the values stored in a 256-bit vector of [8 x float] as
+/// specified by the byte operand. The source values are rounded to integer
+/// values and returned as floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_round_ps(__m256 V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPS / ROUNDPS instruction.
+///
+/// \param V
+/// A 256-bit vector of [8 x float].
+/// \param M
+/// An integer value that specifies the rounding operation.
+/// Bits [7:4] are reserved.
+/// Bit [3] is a precision exception value:
+/// 0: A normal PE exception is used.
+/// 1: The PE field is not updated.
+/// Bit [2] is the rounding control source:
+/// 0: Use bits [1:0] of M.
+/// 1: Use the current MXCSR setting.
+/// Bits [1:0] contain the rounding control definition:
+/// 00: Nearest.
+/// 01: Downward (toward negative infinity).
+/// 10: Upward (toward positive infinity).
+/// 11: Truncated.
+/// \returns A 256-bit vector of [8 x float] containing the rounded values.
+#define _mm256_round_ps(V, M) __extension__ ({ \
+ (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
+
+/// \brief Rounds up the values stored in a 256-bit vector of [4 x double]. The
+/// source values are rounded up to integer values and returned as 64-bit
+/// double-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_ceil_pd(__m256d V);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPD / ROUNDPD instruction.
+///
+/// \param V
+/// A 256-bit vector of [4 x double].
+/// \returns A 256-bit vector of [4 x double] containing the rounded up values.
+#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
+
+/// \brief Rounds down the values stored in a 256-bit vector of [4 x double].
+/// The source values are rounded down to integer values and returned as
+/// 64-bit double-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_floor_pd(__m256d V);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPD / ROUNDPD instruction.
+///
+/// \param V
+/// A 256-bit vector of [4 x double].
+/// \returns A 256-bit vector of [4 x double] containing the rounded down
+/// values.
+#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
+
+/// \brief Rounds up the values stored in a 256-bit vector of [8 x float]. The
+/// source values are rounded up to integer values and returned as
+/// floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_ceil_ps(__m256 V);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPS / ROUNDPS instruction.
+///
+/// \param V
+/// A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the rounded up values.
+#define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
+
+/// \brief Rounds down the values stored in a 256-bit vector of [8 x float]. The
+/// source values are rounded down to integer values and returned as
+/// floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_floor_ps(__m256 V);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VROUNDPS / ROUNDPS instruction.
+///
+/// \param V
+/// A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the rounded down values.
+#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
+
+/* Logical */
+/// \brief Performs a bitwise AND of two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDPD / ANDPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the
+/// values between both operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_and_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4du)__a & (__v4du)__b);
+}
+
+/// \brief Performs a bitwise AND of two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDPS / ANDPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the
+/// values between both operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_and_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8su)__a & (__v8su)__b);
+}
+
+/// \brief Performs a bitwise AND of two 256-bit vectors of [4 x double], using
+/// the one's complement of the values contained in the first source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDNPD / ANDNPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing the left source operand. The
+/// one's complement of this value is used in the bitwise AND.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing the right source operand.
+/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the
+/// values of the second operand and the one's complement of the first
+/// operand.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_andnot_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)(~(__v4du)__a & (__v4du)__b);
+}
+
+/// \brief Performs a bitwise AND of two 256-bit vectors of [8 x float], using
+/// the one's complement of the values contained in the first source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDNPS / ANDNPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the left source operand. The
+/// one's complement of this value is used in the bitwise AND.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing the right source operand.
+/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the
+/// values of the second operand and the one's complement of the first
+/// operand.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_andnot_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)(~(__v8su)__a & (__v8su)__b);
+}
+
+/// \brief Performs a bitwise OR of two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VORPD / ORPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x double] containing the bitwise OR of the
+/// values between both operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_or_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4du)__a | (__v4du)__b);
+}
+
+/// \brief Performs a bitwise OR of two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VORPS / ORPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x float] containing the bitwise OR of the
+/// values between both operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_or_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8su)__a | (__v8su)__b);
+}
+
+/// \brief Performs a bitwise XOR of two 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VXORPD / XORPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x double] containing the bitwise XOR of the
+/// values between both operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_xor_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4du)__a ^ (__v4du)__b);
+}
+
+/// \brief Performs a bitwise XOR of two 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VXORPS / XORPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x float] containing the bitwise XOR of the
+/// values between both operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_xor_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8su)__a ^ (__v8su)__b);
+}
+
+/* Horizontal arithmetic */
+/// \brief Horizontally adds the adjacent pairs of values contained in two
+/// 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHADDPD / HADDPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// The horizontal sums of the values are returned in the even-indexed
+/// elements of a vector of [4 x double].
+/// \param __b
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// The horizontal sums of the values are returned in the odd-indexed
+/// elements of a vector of [4 x double].
+/// \returns A 256-bit vector of [4 x double] containing the horizontal sums of
+/// both operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_hadd_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
+}
+
+/// \brief Horizontally adds the adjacent pairs of values contained in two
+/// 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHADDPS / HADDPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// The horizontal sums of the values are returned in the elements with
+/// index 0, 1, 4, 5 of a vector of [8 x float].
+/// \param __b
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// The horizontal sums of the values are returned in the elements with
+/// index 2, 3, 6, 7 of a vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the horizontal sums of
+/// both operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_hadd_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
+}
+
+/// \brief Horizontally subtracts the adjacent pairs of values contained in two
+/// 256-bit vectors of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHSUBPD / HSUBPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// The horizontal differences between the values are returned in the
+/// even-indexed elements of a vector of [4 x double].
+/// \param __b
+/// A 256-bit vector of [4 x double] containing one of the source operands.
+/// The horizontal differences between the values are returned in the
+/// odd-indexed elements of a vector of [4 x double].
+/// \returns A 256-bit vector of [4 x double] containing the horizontal
+/// differences of both operands.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_hsub_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
+}
+
+/// \brief Horizontally subtracts the adjacent pairs of values contained in two
+/// 256-bit vectors of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHSUBPS / HSUBPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// The horizontal differences between the values are returned in the
+/// elements with index 0, 1, 4, 5 of a vector of [8 x float].
+/// \param __b
+/// A 256-bit vector of [8 x float] containing one of the source operands.
+/// The horizontal differences between the values are returned in the
+/// elements with index 2, 3, 6, 7 of a vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] containing the horizontal
+/// differences of both operands.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_hsub_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);
+}
+
+/* Vector permutations */
+/// \brief Copies the values in a 128-bit vector of [2 x double] as specified
+/// by the 128-bit integer vector operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __c
+/// A 128-bit integer vector operand specifying how the values are to be
+/// copied.
+/// Bit [1]:
+/// 0: Bits [63:0] of the source are copied to bits [63:0] of the
+/// returned vector.
+/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
+/// returned vector.
+/// Bit [65]:
+/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
+/// returned vector.
+/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
+/// returned vector.
+/// \returns A 128-bit vector of [2 x double] containing the copied values.
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm_permutevar_pd(__m128d __a, __m128i __c)
+{
+ return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
+}
+
+/// \brief Copies the values in a 256-bit vector of [4 x double] as
+/// specified by the 256-bit integer vector operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double].
+/// \param __c
+/// A 256-bit integer vector operand specifying how the values are to be
+/// copied.
+/// Bit [1]:
+/// 0: Bits [63:0] of the source are copied to bits [63:0] of the
+/// returned vector.
+/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
+/// returned vector.
+/// Bit [65]:
+/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
+/// returned vector.
+/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
+/// returned vector.
+/// Bit [129]:
+/// 0: Bits [191:128] of the source are copied to bits [191:128] of the
+/// returned vector.
+/// 1: Bits [255:192] of the source are copied to bits [191:128] of the
+/// returned vector.
+/// Bit [193]:
+/// 0: Bits [191:128] of the source are copied to bits [255:192] of the
+/// returned vector.
+/// 1: Bits [255:192] of the source are copied to bits [255:192] of the
+/// returned vector.
+/// \returns A 256-bit vector of [4 x double] containing the copied values.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_permutevar_pd(__m256d __a, __m256i __c)
+{
+ return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
+}
+
+/// \brief Copies the values stored in a 128-bit vector of [4 x float] as
+/// specified by the 128-bit integer vector operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __c
+/// A 128-bit integer vector operand specifying how the values are to be
+/// copied.
+/// Bits [1:0]:
+/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// Bits [33:32]:
+/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// Bits [65:64]:
+/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// Bits [97:96]:
+/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// \returns A 128-bit vector of [4 x float] containing the copied values.
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_permutevar_ps(__m128 __a, __m128i __c)
+{
+ return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
+}
+
+/// \brief Copies the values stored in a 256-bit vector of [8 x float] as
+/// specified by the 256-bit integer vector operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float].
+/// \param __c
+/// A 256-bit integer vector operand specifying how the values are to be
+/// copied.
+/// Bits [1:0]:
+/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// Bits [33:32]:
+/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// Bits [65:64]:
+/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// Bits [97:96]:
+/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// Bits [129:128]:
+/// 00: Bits [159:128] of the source are copied to bits [159:128] of the
+/// returned vector.
+/// 01: Bits [191:160] of the source are copied to bits [159:128] of the
+/// returned vector.
+/// 10: Bits [223:192] of the source are copied to bits [159:128] of the
+/// returned vector.
+/// 11: Bits [255:224] of the source are copied to bits [159:128] of the
+/// returned vector.
+/// Bits [161:160]:
+/// 00: Bits [159:128] of the source are copied to bits [191:160] of the
+/// returned vector.
+/// 01: Bits [191:160] of the source are copied to bits [191:160] of the
+/// returned vector.
+/// 10: Bits [223:192] of the source are copied to bits [191:160] of the
+/// returned vector.
+/// 11: Bits [255:224] of the source are copied to bits [191:160] of the
+/// returned vector.
+/// Bits [193:192]:
+/// 00: Bits [159:128] of the source are copied to bits [223:192] of the
+/// returned vector.
+/// 01: Bits [191:160] of the source are copied to bits [223:192] of the
+/// returned vector.
+/// 10: Bits [223:192] of the source are copied to bits [223:192] of the
+/// returned vector.
+/// 11: Bits [255:224] of the source are copied to bits [223:192] of the
+/// returned vector.
+/// Bits [225:224]:
+/// 00: Bits [159:128] of the source are copied to bits [255:224] of the
+/// returned vector.
+/// 01: Bits [191:160] of the source are copied to bits [255:224] of the
+/// returned vector.
+/// 10: Bits [223:192] of the source are copied to bits [255:224] of the
+/// returned vector.
+/// 11: Bits [255:224] of the source are copied to bits [255:224] of the
+/// returned vector.
+/// \returns A 256-bit vector of [8 x float] containing the copied values.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_permutevar_ps(__m256 __a, __m256i __c)
+{
+ return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
+}
+
+/// \brief Copies the values in a 128-bit vector of [2 x double] as
+/// specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_permute_pd(__m128d A, const int C);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+///
+/// \param A
+/// A 128-bit vector of [2 x double].
+/// \param C
+/// An immediate integer operand specifying how the values are to be copied.
+/// Bit [0]:
+/// 0: Bits [63:0] of the source are copied to bits [63:0] of the
+/// returned vector.
+/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
+/// returned vector.
+/// Bit [1]:
+/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
+/// returned vector.
+/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
+/// returned vector.
+/// \returns A 128-bit vector of [2 x double] containing the copied values.
+#define _mm_permute_pd(A, C) __extension__ ({ \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \
+ (__v2df)_mm_undefined_pd(), \
+ ((C) >> 0) & 0x1, ((C) >> 1) & 0x1); })
+
+/// \brief Copies the values in a 256-bit vector of [4 x double] as
+/// specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_permute_pd(__m256d A, const int C);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+///
+/// \param A
+/// A 256-bit vector of [4 x double].
+/// \param C
+/// An immediate integer operand specifying how the values are to be copied.
+/// Bit [0]:
+/// 0: Bits [63:0] of the source are copied to bits [63:0] of the
+/// returned vector.
+/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
+/// returned vector.
+/// Bit [1]:
+/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
+/// returned vector.
+/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
+/// returned vector.
+/// Bit [2]:
+/// 0: Bits [191:128] of the source are copied to bits [191:128] of the
+/// returned vector.
+/// 1: Bits [255:192] of the source are copied to bits [191:128] of the
+/// returned vector.
+/// Bit [3]:
+/// 0: Bits [191:128] of the source are copied to bits [255:192] of the
+/// returned vector.
+/// 1: Bits [255:192] of the source are copied to bits [255:192] of the
+/// returned vector.
+/// \returns A 256-bit vector of [4 x double] containing the copied values.
+#define _mm256_permute_pd(A, C) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
+ (__v4df)_mm256_undefined_pd(), \
+ 0 + (((C) >> 0) & 0x1), \
+ 0 + (((C) >> 1) & 0x1), \
+ 2 + (((C) >> 2) & 0x1), \
+ 2 + (((C) >> 3) & 0x1)); })
+
+/// \brief Copies the values in a 128-bit vector of [4 x float] as
+/// specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm_permute_ps(__m128 A, const int C);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param A
+/// A 128-bit vector of [4 x float].
+/// \param C
+/// An immediate integer operand specifying how the values are to be copied.
+/// Bits [1:0]:
+/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// Bits [3:2]:
+/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// Bits [5:4]:
+/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// Bits [7:6]:
+/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// \returns A 128-bit vector of [4 x float] containing the copied values.
+#define _mm_permute_ps(A, C) __extension__ ({ \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
+ (__v4sf)_mm_undefined_ps(), \
+ ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
+ ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
+
+/// \brief Copies the values in a 256-bit vector of [8 x float] as
+/// specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_permute_ps(__m256 A, const int C);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param A
+/// A 256-bit vector of [8 x float].
+/// \param C
+/// An immediate integer operand specifying how the values are to be copied.
+/// Bits [1:0]:
+/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
+/// returned vector.
+/// Bits [3:2]:
+/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
+/// returned vector.
+/// Bits [5:4]:
+/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
+/// returned vector.
+/// Bits [7:6]:
+/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
+/// returned vector.
+/// Bits [1:0]:
+/// 00: Bits [159:128] of the source are copied to bits [159:128] of the
+/// returned vector.
+/// 01: Bits [191:160] of the source are copied to bits [159:128] of the
+/// returned vector.
+/// 10: Bits [223:192] of the source are copied to bits [159:128] of the
+/// returned vector.
+/// 11: Bits [255:224] of the source are copied to bits [159:128] of the
+/// returned vector.
+/// Bits [3:2]:
+/// 00: Bits [159:128] of the source are copied to bits [191:160] of the
+/// returned vector.
+/// 01: Bits [191:160] of the source are copied to bits [191:160] of the
+/// returned vector.
+/// 10: Bits [223:192] of the source are copied to bits [191:160] of the
+/// returned vector.
+/// 11: Bits [255:224] of the source are copied to bits [191:160] of the
+/// returned vector.
+/// Bits [5:4]:
+/// 00: Bits [159:128] of the source are copied to bits [223:192] of the
+/// returned vector.
+/// 01: Bits [191:160] of the source are copied to bits [223:192] of the
+/// returned vector.
+/// 10: Bits [223:192] of the source are copied to bits [223:192] of the
+/// returned vector.
+/// 11: Bits [255:224] of the source are copied to bits [223:192] of the
+/// returned vector.
+/// Bits [7:6]:
+/// 00: Bits [159:128] of the source are copied to bits [255:224] of the
+/// returned vector.
+/// 01: Bits [191:160] of the source are copied to bits [255:224] of the
+/// returned vector.
+/// 10: Bits [223:192] of the source are copied to bits [255:224] of the
+/// returned vector.
+/// 11: Bits [255:224] of the source are copied to bits [255:224] of the
+/// returned vector.
+/// \returns A 256-bit vector of [8 x float] containing the copied values.
+#define _mm256_permute_ps(A, C) __extension__ ({ \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
+ (__v8sf)_mm256_undefined_ps(), \
+ 0 + (((C) >> 0) & 0x3), \
+ 0 + (((C) >> 2) & 0x3), \
+ 0 + (((C) >> 4) & 0x3), \
+ 0 + (((C) >> 6) & 0x3), \
+ 4 + (((C) >> 0) & 0x3), \
+ 4 + (((C) >> 2) & 0x3), \
+ 4 + (((C) >> 4) & 0x3), \
+ 4 + (((C) >> 6) & 0x3)); })
+
+/// \brief Permutes 128-bit data values stored in two 256-bit vectors of
+/// [4 x double], as specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_permute2f128_pd(__m256d V1, __m256d V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERM2F128 / PERM2F128 instruction.
+///
+/// \param V1
+/// A 256-bit vector of [4 x double].
+/// \param V2
+/// A 256-bit vector of [4 x double.
+/// \param M
+/// An immediate integer operand specifying how the values are to be
+/// permuted.
+/// Bits [1:0]:
+/// 00: Bits [127:0] of operand V1 are copied to bits [127:0] of the
+/// destination.
+/// 01: Bits [255:128] of operand V1 are copied to bits [127:0] of the
+/// destination.
+/// 10: Bits [127:0] of operand V2 are copied to bits [127:0] of the
+/// destination.
+/// 11: Bits [255:128] of operand V2 are copied to bits [127:0] of the
+/// destination.
+/// Bits [5:4]:
+/// 00: Bits [127:0] of operand V1 are copied to bits [255:128] of the
+/// destination.
+/// 01: Bits [255:128] of operand V1 are copied to bits [255:128] of the
+/// destination.
+/// 10: Bits [127:0] of operand V2 are copied to bits [255:128] of the
+/// destination.
+/// 11: Bits [255:128] of operand V2 are copied to bits [255:128] of the
+/// destination.
+/// \returns A 256-bit vector of [4 x double] containing the copied values.
+#define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
+ (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), (M)); })
+
+/// \brief Permutes 128-bit data values stored in two 256-bit vectors of
+/// [8 x float], as specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_permute2f128_ps(__m256 V1, __m256 V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERM2F128 / PERM2F128 instruction.
+///
+/// \param V1
+/// A 256-bit vector of [8 x float].
+/// \param V2
+/// A 256-bit vector of [8 x float].
+/// \param M
+/// An immediate integer operand specifying how the values are to be
+/// permuted.
+/// Bits [1:0]:
+/// 00: Bits [127:0] of operand V1 are copied to bits [127:0] of the
+/// destination.
+/// 01: Bits [255:128] of operand V1 are copied to bits [127:0] of the
+/// destination.
+/// 10: Bits [127:0] of operand V2 are copied to bits [127:0] of the
+/// destination.
+/// 11: Bits [255:128] of operand V2 are copied to bits [127:0] of the
+/// destination.
+/// Bits [5:4]:
+/// 00: Bits [127:0] of operand V1 are copied to bits [255:128] of the
+/// destination.
+/// 01: Bits [255:128] of operand V1 are copied to bits [255:128] of the
+/// destination.
+/// 10: Bits [127:0] of operand V2 are copied to bits [255:128] of the
+/// destination.
+/// 11: Bits [255:128] of operand V2 are copied to bits [255:128] of the
+/// destination.
+/// \returns A 256-bit vector of [8 x float] containing the copied values.
+#define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (M)); })
+
+/// \brief Permutes 128-bit data values stored in two 256-bit integer vectors,
+/// as specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256i _mm256_permute2f128_si256(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERM2F128 / PERM2F128 instruction.
+///
+/// \param V1
+/// A 256-bit integer vector.
+/// \param V2
+/// A 256-bit integer vector.
+/// \param M
+/// An immediate integer operand specifying how the values are to be copied.
+/// Bits [1:0]:
+/// 00: Bits [127:0] of operand V1 are copied to bits [127:0] of the
+/// destination.
+/// 01: Bits [255:128] of operand V1 are copied to bits [127:0] of the
+/// destination.
+/// 10: Bits [127:0] of operand V2 are copied to bits [127:0] of the
+/// destination.
+/// 11: Bits [255:128] of operand V2 are copied to bits [127:0] of the
+/// destination.
+/// Bits [5:4]:
+/// 00: Bits [127:0] of operand V1 are copied to bits [255:128] of the
+/// destination.
+/// 01: Bits [255:128] of operand V1 are copied to bits [255:128] of the
+/// destination.
+/// 10: Bits [127:0] of operand V2 are copied to bits [255:128] of the
+/// destination.
+/// 11: Bits [255:128] of operand V2 are copied to bits [255:128] of the
+/// destination.
+/// \returns A 256-bit integer vector containing the copied values.
+#define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), (M)); })
+
+/* Vector Blend */
+/// \brief Merges 64-bit double-precision data values stored in either of the
+/// two 256-bit vectors of [4 x double], as specified by the immediate
+/// integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_blend_pd(__m256d V1, __m256d V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VBLENDPD / BLENDPD instruction.
+///
+/// \param V1
+/// A 256-bit vector of [4 x double].
+/// \param V2
+/// A 256-bit vector of [4 x double].
+/// \param M
+/// An immediate integer operand, with mask bits [3:0] specifying how the
+/// values are to be copied. The position of the mask bit corresponds to the
+/// index of a copied value. When a mask bit is 0, the corresponding 64-bit
+/// element in operand V1 is copied to the same position in the destination.
+/// When a mask bit is 1, the corresponding 64-bit element in operand V2 is
+/// copied to the same position in the destination.
+/// \returns A 256-bit vector of [4 x double] containing the copied values.
+#define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), \
+ (((M) & 0x01) ? 4 : 0), \
+ (((M) & 0x02) ? 5 : 1), \
+ (((M) & 0x04) ? 6 : 2), \
+ (((M) & 0x08) ? 7 : 3)); })
+
+/// \brief Merges 32-bit single-precision data values stored in either of the
+/// two 256-bit vectors of [8 x float], as specified by the immediate
+/// integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_blend_ps(__m256 V1, __m256 V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VBLENDPS / BLENDPS instruction.
+///
+/// \param V1
+/// A 256-bit vector of [8 x float].
+/// \param V2
+/// A 256-bit vector of [8 x float].
+/// \param M
+/// An immediate integer operand, with mask bits [7:0] specifying how the
+/// values are to be copied. The position of the mask bit corresponds to the
+/// index of a copied value. When a mask bit is 0, the corresponding 32-bit
+/// element in operand V1 is copied to the same position in the destination.
+/// When a mask bit is 1, the corresponding 32-bit element in operand V2 is
+/// copied to the same position in the destination.
+/// \returns A 256-bit vector of [8 x float] containing the copied values.
+#define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), \
+ (((M) & 0x01) ? 8 : 0), \
+ (((M) & 0x02) ? 9 : 1), \
+ (((M) & 0x04) ? 10 : 2), \
+ (((M) & 0x08) ? 11 : 3), \
+ (((M) & 0x10) ? 12 : 4), \
+ (((M) & 0x20) ? 13 : 5), \
+ (((M) & 0x40) ? 14 : 6), \
+ (((M) & 0x80) ? 15 : 7)); })
+
+/// \brief Merges 64-bit double-precision data values stored in either of the
+/// two 256-bit vectors of [4 x double], as specified by the 256-bit vector
+/// operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VBLENDVPD / BLENDVPD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double].
+/// \param __b
+/// A 256-bit vector of [4 x double].
+/// \param __c
+/// A 256-bit vector operand, with mask bits 255, 191, 127, and 63 specifying
+/// how the values are to be copied. The position of the mask bit corresponds
+/// to the most significant bit of a copied value. When a mask bit is 0, the
+/// corresponding 64-bit element in operand __a is copied to the same
+/// position in the destination. When a mask bit is 1, the corresponding
+/// 64-bit element in operand __b is copied to the same position in the
+/// destination.
+/// \returns A 256-bit vector of [4 x double] containing the copied values.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
+{
+ return (__m256d)__builtin_ia32_blendvpd256(
+ (__v4df)__a, (__v4df)__b, (__v4df)__c);
+}
+
+/// \brief Merges 32-bit single-precision data values stored in either of the
+/// two 256-bit vectors of [8 x float], as specified by the 256-bit vector
+/// operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VBLENDVPS / BLENDVPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float].
+/// \param __b
+/// A 256-bit vector of [8 x float].
+/// \param __c
+/// A 256-bit vector operand, with mask bits 255, 223, 191, 159, 127, 95, 63,
+/// and 31 specifying how the values are to be copied. The position of the
+/// mask bit corresponds to the most significant bit of a copied value. When
+/// a mask bit is 0, the corresponding 32-bit element in operand __a is
+/// copied to the same position in the destination. When a mask bit is 1, the
+/// corresponding 32-bit element in operand __b is copied to the same
+/// position in the destination.
+/// \returns A 256-bit vector of [8 x float] containing the copied values.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
+{
+ return (__m256)__builtin_ia32_blendvps256(
+ (__v8sf)__a, (__v8sf)__b, (__v8sf)__c);
+}
+
+/* Vector Dot Product */
+/// \brief Computes two dot products in parallel, using the lower and upper
+/// halves of two [8 x float] vectors as input to the two computations, and
+/// returning the two dot products in the lower and upper halves of the
+/// [8 x float] result. The immediate integer operand controls which
+/// input elements will contribute to the dot product, and where the final
+/// results are returned. In general, for each dot product, the four
+/// corresponding elements of the input vectors are multiplied; the first
+/// two and second two products are summed, then the two sums are added to
+/// form the final result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_dp_ps(__m256 V1, __m256 V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VDPPS / DPPS instruction.
+///
+/// \param V1
+/// A vector of [8 x float] values, treated as two [4 x float] vectors.
+/// \param V2
+/// A vector of [8 x float] values, treated as two [4 x float] vectors.
+/// \param M
+/// An immediate integer argument. Bits [7:4] determine which elements of
+/// the input vectors are used, with bit [4] corresponding to the lowest
+/// element and bit [7] corresponding to the highest element of each [4 x
+/// float] subvector. If a bit is set, the corresponding elements from the
+/// two input vectors are used as an input for dot product; otherwise that
+/// input is treated as zero. Bits [3:0] determine which elements of the
+/// result will receive a copy of the final dot product, with bit [0]
+/// corresponding to the lowest element and bit [3] corresponding to the
+/// highest element of each [4 x float] subvector. If a bit is set, the dot
+/// product is returned in the corresponding element; otherwise that element
+/// is set to zero. The bitmask is applied in the same way to each of the
+/// two parallel dot product computations.
+/// \returns A 256-bit vector of [8 x float] containing the two dot products.
+#define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (M)); })
+
+/* Vector shuffle */
+/// \brief Selects 8 float values from the 256-bit operands of [8 x float], as
+/// specified by the immediate value operand. The four selected elements in
+/// each operand are copied to the destination according to the bits
+/// specified in the immediate operand. The selected elements from the first
+/// 256-bit operand are copied to bits [63:0] and bits [191:128] of the
+/// destination, and the selected elements from the second 256-bit operand
+/// are copied to bits [127:64] and bits [255:192] of the destination. For
+/// example, if bits [7:0] of the immediate operand contain a value of 0xFF,
+/// the 256-bit destination vector would contain the following values: b[7],
+/// b[7], a[7], a[7], b[3], b[3], a[3], a[3].
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_shuffle_ps(__m256 a, __m256 b, const int mask);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHUFPS / SHUFPS instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x float]. The four selected elements in this
+/// operand are copied to bits [63:0] and bits [191:128] in the destination,
+/// according to the bits specified in the immediate operand.
+/// \param b
+/// A 256-bit vector of [8 x float]. The four selected elements in this
+/// operand are copied to bits [127:64] and bits [255:192] in the
+/// destination, according to the bits specified in the immediate operand.
+/// \param mask
+/// An immediate value containing an 8-bit value specifying which elements to
+/// copy from a and b. Bits [3:0] specify the values copied from operand a.
+/// Bits [7:4] specify the values copied from operand b.
+/// The destinations within the 256-bit destination are assigned values as
+/// follows, according to the bit value assignments described below:
+/// Bits [1:0] are used to assign values to bits [31:0] and [159:128] in the
+/// destination.
+/// Bits [3:2] are used to assign values to bits [63:32] and [191:160] in the
+/// destination.
+/// Bits [5:4] are used to assign values to bits [95:64] and [223:192] in the
+/// destination.
+/// Bits [7:6] are used to assign values to bits [127:96] and [255:224] in
+/// the destination.
+/// Bit value assignments:
+/// 00: Bits [31:0] and [159:128] are copied from the selected operand.
+/// 01: Bits [63:32] and [191:160] are copied from the selected operand.
+/// 10: Bits [95:64] and [223:192] are copied from the selected operand.
+/// 11: Bits [127:96] and [255:224] are copied from the selected operand.
+/// \returns A 256-bit vector of [8 x float] containing the shuffled values.
+#define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), \
+ 0 + (((mask) >> 0) & 0x3), \
+ 0 + (((mask) >> 2) & 0x3), \
+ 8 + (((mask) >> 4) & 0x3), \
+ 8 + (((mask) >> 6) & 0x3), \
+ 4 + (((mask) >> 0) & 0x3), \
+ 4 + (((mask) >> 2) & 0x3), \
+ 12 + (((mask) >> 4) & 0x3), \
+ 12 + (((mask) >> 6) & 0x3)); })
+
+/// \brief Selects four double-precision values from the 256-bit operands of
+/// [4 x double], as specified by the immediate value operand. The selected
+/// elements from the first 256-bit operand are copied to bits [63:0] and
+/// bits [191:128] in the destination, and the selected elements from the
+/// second 256-bit operand are copied to bits [127:64] and bits [255:192] in
+/// the destination. For example, if bits [3:0] of the immediate operand
+/// contain a value of 0xF, the 256-bit destination vector would contain the
+/// following values: b[3], a[3], b[1], a[1].
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_shuffle_pd(__m256d a, __m256d b, const int mask);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHUFPD / SHUFPD instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x double].
+/// \param b
+/// A 256-bit vector of [4 x double].
+/// \param mask
+/// An immediate value containing 8-bit values specifying which elements to
+/// copy from a and b:
+/// Bit [0]=0: Bits [63:0] are copied from a to bits [63:0] of the
+/// destination.
+/// Bit [0]=1: Bits [127:64] are copied from a to bits [63:0] of the
+/// destination.
+/// Bit [1]=0: Bits [63:0] are copied from b to bits [127:64] of the
+/// destination.
+/// Bit [1]=1: Bits [127:64] are copied from b to bits [127:64] of the
+/// destination.
+/// Bit [2]=0: Bits [191:128] are copied from a to bits [191:128] of the
+/// destination.
+/// Bit [2]=1: Bits [255:192] are copied from a to bits [191:128] of the
+/// destination.
+/// Bit [3]=0: Bits [191:128] are copied from b to bits [255:192] of the
+/// destination.
+/// Bit [3]=1: Bits [255:192] are copied from b to bits [255:192] of the
+/// destination.
+/// \returns A 256-bit vector of [4 x double] containing the shuffled values.
+#define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), \
+ 0 + (((mask) >> 0) & 0x1), \
+ 4 + (((mask) >> 1) & 0x1), \
+ 2 + (((mask) >> 2) & 0x1), \
+ 6 + (((mask) >> 3) & 0x1)); })
+
+/* Compare */
+#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
+#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
+#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
+#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
+#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
+#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
+#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
+#define _CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */
+#define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
+#define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */
+#define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
+#define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */
+#define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */
+#define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */
+#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
+#define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */
+#define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */
+#define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */
+#define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */
+#define _CMP_UNORD_S 0x13 /* Unordered (signaling) */
+#define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */
+#define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */
+#define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */
+#define _CMP_ORD_S 0x17 /* Ordered (signaling) */
+#define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */
+#define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */
+#define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */
+#define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */
+#define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */
+#define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */
+#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
+#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
+
+/// \brief Compares each of the corresponding double-precision values of two
+/// 128-bit vectors of [2 x double], using the operation specified by the
+/// immediate integer operand. Returns a [2 x double] vector consisting of
+/// two doubles corresponding to the two comparison results: zero if the
+/// comparison is false, and all 1's if the comparison is true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPPD / CMPPD instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double].
+/// \param b
+/// A 128-bit vector of [2 x double].
+/// \param c
+/// An immediate integer operand, with bits [4:0] specifying which comparison
+/// operation to use:
+/// 00h, 08h, 10h, 18h: Equal
+/// 01h, 09h, 11h, 19h: Less than
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+/// operands)
+/// 03h, 0Bh, 13h, 1Bh: Unordered
+/// 04h, 0Ch, 14h, 1Ch: Not equal
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+/// (swapped operands)
+/// 07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 128-bit vector of [2 x double] containing the comparison results.
+#define _mm_cmp_pd(a, b, c) __extension__ ({ \
+ (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (c)); })
+
+/// \brief Compares each of the corresponding values of two 128-bit vectors of
+/// [4 x float], using the operation specified by the immediate integer
+/// operand. Returns a [4 x float] vector consisting of four floats
+/// corresponding to the four comparison results: zero if the comparison is
+/// false, and all 1's if the comparison is true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPPS / CMPPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float].
+/// \param b
+/// A 128-bit vector of [4 x float].
+/// \param c
+/// An immediate integer operand, with bits [4:0] specifying which comparison
+/// operation to use:
+/// 00h, 08h, 10h, 18h: Equal
+/// 01h, 09h, 11h, 19h: Less than
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+/// operands)
+/// 03h, 0Bh, 13h, 1Bh: Unordered
+/// 04h, 0Ch, 14h, 1Ch: Not equal
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+/// (swapped operands)
+/// 07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+#define _mm_cmp_ps(a, b, c) __extension__ ({ \
+ (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (c)); })
+
+/// \brief Compares each of the corresponding double-precision values of two
+/// 256-bit vectors of [4 x double], using the operation specified by the
+/// immediate integer operand. Returns a [4 x double] vector consisting of
+/// four doubles corresponding to the four comparison results: zero if the
+/// comparison is false, and all 1's if the comparison is true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_cmp_pd(__m256d a, __m256d b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPPD / CMPPD instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x double].
+/// \param b
+/// A 256-bit vector of [4 x double].
+/// \param c
+/// An immediate integer operand, with bits [4:0] specifying which comparison
+/// operation to use:
+/// 00h, 08h, 10h, 18h: Equal
+/// 01h, 09h, 11h, 19h: Less than
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+/// operands)
+/// 03h, 0Bh, 13h, 1Bh: Unordered
+/// 04h, 0Ch, 14h, 1Ch: Not equal
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+/// (swapped operands)
+/// 07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 256-bit vector of [4 x double] containing the comparison results.
+#define _mm256_cmp_pd(a, b, c) __extension__ ({ \
+ (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (c)); })
+
+/// \brief Compares each of the corresponding values of two 256-bit vectors of
+/// [8 x float], using the operation specified by the immediate integer
+/// operand. Returns a [8 x float] vector consisting of eight floats
+/// corresponding to the eight comparison results: zero if the comparison is
+/// false, and all 1's if the comparison is true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_cmp_ps(__m256 a, __m256 b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPPS / CMPPS instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x float].
+/// \param b
+/// A 256-bit vector of [8 x float].
+/// \param c
+/// An immediate integer operand, with bits [4:0] specifying which comparison
+/// operation to use:
+/// 00h, 08h, 10h, 18h: Equal
+/// 01h, 09h, 11h, 19h: Less than
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+/// operands)
+/// 03h, 0Bh, 13h, 1Bh: Unordered
+/// 04h, 0Ch, 14h, 1Ch: Not equal
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+/// (swapped operands)
+/// 07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 256-bit vector of [8 x float] containing the comparison results.
+#define _mm256_cmp_ps(a, b, c) __extension__ ({ \
+ (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (c)); })
+
+/// \brief Compares each of the corresponding scalar double-precision values of
+/// two 128-bit vectors of [2 x double], using the operation specified by the
+/// immediate integer operand. If the result is true, all 64 bits of the
+/// destination vector are set; otherwise they are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPSD / CMPSD instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double].
+/// \param b
+/// A 128-bit vector of [2 x double].
+/// \param c
+/// An immediate integer operand, with bits [4:0] specifying which comparison
+/// operation to use:
+/// 00h, 08h, 10h, 18h: Equal
+/// 01h, 09h, 11h, 19h: Less than
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+/// operands)
+/// 03h, 0Bh, 13h, 1Bh: Unordered
+/// 04h, 0Ch, 14h, 1Ch: Not equal
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+/// (swapped operands)
+/// 07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 128-bit vector of [2 x double] containing the comparison results.
+#define _mm_cmp_sd(a, b, c) __extension__ ({ \
+ (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (c)); })
+
+/// \brief Compares each of the corresponding scalar values of two 128-bit
+/// vectors of [4 x float], using the operation specified by the immediate
+/// integer operand. If the result is true, all 32 bits of the destination
+/// vector are set; otherwise they are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCMPSS / CMPSS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float].
+/// \param b
+/// A 128-bit vector of [4 x float].
+/// \param c
+/// An immediate integer operand, with bits [4:0] specifying which comparison
+/// operation to use:
+/// 00h, 08h, 10h, 18h: Equal
+/// 01h, 09h, 11h, 19h: Less than
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
+/// operands)
+/// 03h, 0Bh, 13h, 1Bh: Unordered
+/// 04h, 0Ch, 14h, 1Ch: Not equal
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
+/// (swapped operands)
+/// 07h, 0Fh, 17h, 1Fh: Ordered
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+#define _mm_cmp_ss(a, b, c) __extension__ ({ \
+ (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (c)); })
+
+/// \brief Takes a [8 x i32] vector and returns the vector element value
+/// indexed by the immediate constant operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
+/// EXTRACTF128+COMPOSITE instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __imm
+/// An immediate integer operand with bits [2:0] determining which vector
+/// element is extracted and returned.
+/// \returns A 32-bit integer containing the extracted 32 bits of extended
+/// packed data.
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_extract_epi32(__m256i __a, const int __imm)
+{
+ __v8si __b = (__v8si)__a;
+ return __b[__imm & 7];
+}
+
+/// \brief Takes a [16 x i16] vector and returns the vector element value
+/// indexed by the immediate constant operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
+/// EXTRACTF128+COMPOSITE instruction.
+///
+/// \param __a
+/// A 256-bit integer vector of [16 x i16].
+/// \param __imm
+/// An immediate integer operand with bits [3:0] determining which vector
+/// element is extracted and returned.
+/// \returns A 32-bit integer containing the extracted 16 bits of zero extended
+/// packed data.
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_extract_epi16(__m256i __a, const int __imm)
+{
+ __v16hi __b = (__v16hi)__a;
+ return (unsigned short)__b[__imm & 15];
+}
+
+/// \brief Takes a [32 x i8] vector and returns the vector element value
+/// indexed by the immediate constant operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
+/// EXTRACTF128+COMPOSITE instruction.
+///
+/// \param __a
+/// A 256-bit integer vector of [32 x i8].
+/// \param __imm
+/// An immediate integer operand with bits [4:0] determining which vector
+/// element is extracted and returned.
+/// \returns A 32-bit integer containing the extracted 8 bits of zero extended
+/// packed data.
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_extract_epi8(__m256i __a, const int __imm)
+{
+ __v32qi __b = (__v32qi)__a;
+ return (unsigned char)__b[__imm & 31];
+}
+
+#ifdef __x86_64__
+/// \brief Takes a [4 x i64] vector and returns the vector element value
+/// indexed by the immediate constant operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
+/// EXTRACTF128+COMPOSITE instruction.
+///
+/// \param __a
+/// A 256-bit integer vector of [4 x i64].
+/// \param __imm
+/// An immediate integer operand with bits [1:0] determining which vector
+/// element is extracted and returned.
+/// \returns A 64-bit integer containing the extracted 64 bits of extended
+/// packed data.
+static __inline long long __DEFAULT_FN_ATTRS
+_mm256_extract_epi64(__m256i __a, const int __imm)
+{
+ __v4di __b = (__v4di)__a;
+ return __b[__imm & 3];
+}
+#endif
+
+/// \brief Takes a [8 x i32] vector and replaces the vector element value
+/// indexed by the immediate constant operand by a new value. Returns the
+/// modified vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
+/// INSERTF128+COMPOSITE instruction.
+///
+/// \param __a
+/// A vector of [8 x i32] to be used by the insert operation.
+/// \param __b
+/// An integer value. The replacement value for the insert operation.
+/// \param __imm
+/// An immediate integer specifying the index of the vector element to be
+/// replaced.
+/// \returns A copy of vector __a, after replacing its element indexed by __imm
+/// with __b.
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi32(__m256i __a, int __b, int const __imm)
+{
+ __v8si __c = (__v8si)__a;
+ __c[__imm & 7] = __b;
+ return (__m256i)__c;
+}
+
+
+/// \brief Takes a [16 x i16] vector and replaces the vector element value
+/// indexed by the immediate constant operand with a new value. Returns the
+/// modified vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
+/// INSERTF128+COMPOSITE instruction.
+///
+/// \param __a
+/// A vector of [16 x i16] to be used by the insert operation.
+/// \param __b
+/// An i16 integer value. The replacement value for the insert operation.
+/// \param __imm
+/// An immediate integer specifying the index of the vector element to be
+/// replaced.
+/// \returns A copy of vector __a, after replacing its element indexed by __imm
+/// with __b.
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi16(__m256i __a, int __b, int const __imm)
+{
+ __v16hi __c = (__v16hi)__a;
+ __c[__imm & 15] = __b;
+ return (__m256i)__c;
+}
+
+/// \brief Takes a [32 x i8] vector and replaces the vector element value
+/// indexed by the immediate constant operand with a new value. Returns the
+/// modified vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
+/// INSERTF128+COMPOSITE instruction.
+///
+/// \param __a
+/// A vector of [32 x i8] to be used by the insert operation.
+/// \param __b
+/// An i8 integer value. The replacement value for the insert operation.
+/// \param __imm
+/// An immediate integer specifying the index of the vector element to be
+/// replaced.
+/// \returns A copy of vector __a, after replacing its element indexed by __imm
+/// with __b.
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi8(__m256i __a, int __b, int const __imm)
+{
+ __v32qi __c = (__v32qi)__a;
+ __c[__imm & 31] = __b;
+ return (__m256i)__c;
+}
+
+#ifdef __x86_64__
+/// \brief Takes a [4 x i64] vector and replaces the vector element value
+/// indexed by the immediate constant operand with a new value. Returns the
+/// modified vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
+/// INSERTF128+COMPOSITE instruction.
+///
+/// \param __a
+/// A vector of [4 x i64] to be used by the insert operation.
+/// \param __b
+/// A 64-bit integer value. The replacement value for the insert operation.
+/// \param __imm
+/// An immediate integer specifying the index of the vector element to be
+/// replaced.
+/// \returns A copy of vector __a, after replacing its element indexed by __imm
+/// with __b.
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi64(__m256i __a, long long __b, int const __imm)
+{
+ __v4di __c = (__v4di)__a;
+ __c[__imm & 3] = __b;
+ return (__m256i)__c;
+}
+#endif
+
+/* Conversion */
+/// \brief Converts a vector of [4 x i32] into a vector of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTDQ2PD / CVTDQ2PD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector of [4 x i32].
+/// \returns A 256-bit vector of [4 x double] containing the converted values.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_pd(__m128i __a)
+{
+ return (__m256d)__builtin_convertvector((__v4si)__a, __v4df);
+}
+
+/// \brief Converts a vector of [8 x i32] into a vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTDQ2PS / CVTDQ2PS instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \returns A 256-bit vector of [8 x float] containing the converted values.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_ps(__m256i __a)
+{
+ return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a);
+}
+
+/// \brief Converts a 256-bit vector of [4 x double] into a 128-bit vector of
+/// [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTPD2PS / CVTPD2PS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double].
+/// \returns A 128-bit vector of [4 x float] containing the converted values.
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtpd_ps(__m256d __a)
+{
+ return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
+}
+
+/// \brief Converts a vector of [8 x float] into a vector of [8 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTPS2DQ / CVTPS2DQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float].
+/// \returns A 256-bit integer vector containing the converted values.
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epi32(__m256 __a)
+{
+ return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtps_pd(__m128 __a)
+{
+ return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epi32(__m256d __a)
+{
+ return (__m128i)__builtin_convertvector((__v4df) __a, __v4si);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epi32(__m256d __a)
+{
+ return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epi32(__m256 __a)
+{
+ return (__m256i)__builtin_convertvector((__v8sf) __a, __v8si);
+}
+
+static __inline double __DEFAULT_FN_ATTRS
+_mm256_cvtsd_f64(__m256d __a)
+{
+ return __a[0];
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_cvtsi256_si32(__m256i __a)
+{
+ __v8si __b = (__v8si)__a;
+ return __b[0];
+}
+
+static __inline float __DEFAULT_FN_ATTRS
+_mm256_cvtss_f32(__m256 __a)
+{
+ return __a[0];
+}
+
+/* Vector replicate */
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_movehdup_ps(__m256 __a)
+{
+ return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_moveldup_ps(__m256 __a)
+{
+ return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_movedup_pd(__m256d __a)
+{
+ return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2);
+}
+
+/* Unpack and Interleave */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_unpackhi_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_unpacklo_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_unpackhi_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_unpacklo_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
+}
+
+/* Bit Test */
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testz_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testc_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testnzc_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testz_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testc_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testnzc_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testz_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testc_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testnzc_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testz_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testc_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testnzc_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testz_si256(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testc_si256(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testnzc_si256(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b);
+}
+
+/* Vector extract sign mask */
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_movemask_pd(__m256d __a)
+{
+ return __builtin_ia32_movmskpd256((__v4df)__a);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_movemask_ps(__m256 __a)
+{
+ return __builtin_ia32_movmskps256((__v8sf)__a);
+}
+
+/* Vector __zero */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_zeroall(void)
+{
+ __builtin_ia32_vzeroall();
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_zeroupper(void)
+{
+ __builtin_ia32_vzeroupper();
+}
+
+/* Vector load with broadcast */
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_broadcast_ss(float const *__a)
+{
+ float __f = *__a;
+ return (__m128)(__v4sf){ __f, __f, __f, __f };
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcast_sd(double const *__a)
+{
+ double __d = *__a;
+ return (__m256d)(__v4df){ __d, __d, __d, __d };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcast_ss(float const *__a)
+{
+ float __f = *__a;
+ return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcast_pd(__m128d const *__a)
+{
+ return (__m256d)__builtin_ia32_vbroadcastf128_pd256((__v2df const *)__a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcast_ps(__m128 const *__a)
+{
+ return (__m256)__builtin_ia32_vbroadcastf128_ps256((__v4sf const *)__a);
+}
+
+/* SIMD load ops */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_load_pd(double const *__p)
+{
+ return *(__m256d *)__p;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_load_ps(float const *__p)
+{
+ return *(__m256 *)__p;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_loadu_pd(double const *__p)
+{
+ struct __loadu_pd {
+ __m256d __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_pd*)__p)->__v;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_loadu_ps(float const *__p)
+{
+ struct __loadu_ps {
+ __m256 __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_ps*)__p)->__v;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_load_si256(__m256i const *__p)
+{
+ return *__p;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_loadu_si256(__m256i const *__p)
+{
+ struct __loadu_si256 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_si256*)__p)->__v;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_lddqu_si256(__m256i const *__p)
+{
+ return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
+}
+
+/* SIMD store ops */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_store_pd(double *__p, __m256d __a)
+{
+ *(__m256d *)__p = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_store_ps(float *__p, __m256 __a)
+{
+ *(__m256 *)__p = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu_pd(double *__p, __m256d __a)
+{
+ struct __storeu_pd {
+ __m256d __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_pd*)__p)->__v = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu_ps(float *__p, __m256 __a)
+{
+ struct __storeu_ps {
+ __m256 __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_ps*)__p)->__v = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_store_si256(__m256i *__p, __m256i __a)
+{
+ *__p = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu_si256(__m256i *__p, __m256i __a)
+{
+ struct __storeu_si256 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si256*)__p)->__v = __a;
+}
+
+/* Conditional load ops */
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm_maskload_pd(double const *__p, __m128i __m)
+{
+ return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_maskload_pd(double const *__p, __m256i __m)
+{
+ return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p,
+ (__v4di)__m);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_maskload_ps(float const *__p, __m128i __m)
+{
+ return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_maskload_ps(float const *__p, __m256i __m)
+{
+ return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m);
+}
+
+/* Conditional store ops */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
+{
+ __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
+{
+ __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
+{
+ __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
+{
+ __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a);
+}
+
+/* Cacheability support ops */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_stream_si256(__m256i *__a, __m256i __b)
+{
+ __builtin_nontemporal_store((__v4di)__b, (__v4di*)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_stream_pd(double *__a, __m256d __b)
+{
+ __builtin_nontemporal_store((__v4df)__b, (__v4df*)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_stream_ps(float *__p, __m256 __a)
+{
+ __builtin_nontemporal_store((__v8sf)__a, (__v8sf*)__p);
+}
+
+/* Create vectors */
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_undefined_pd(void)
+{
+ return (__m256d)__builtin_ia32_undef256();
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_undefined_ps(void)
+{
+ return (__m256)__builtin_ia32_undef256();
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_undefined_si256(void)
+{
+ return (__m256i)__builtin_ia32_undef256();
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_set_pd(double __a, double __b, double __c, double __d)
+{
+ return (__m256d){ __d, __c, __b, __a };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_set_ps(float __a, float __b, float __c, float __d,
+ float __e, float __f, float __g, float __h)
+{
+ return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
+ int __i4, int __i5, int __i6, int __i7)
+{
+ return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
+ short __w11, short __w10, short __w09, short __w08,
+ short __w07, short __w06, short __w05, short __w04,
+ short __w03, short __w02, short __w01, short __w00)
+{
+ return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
+ __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
+ char __b27, char __b26, char __b25, char __b24,
+ char __b23, char __b22, char __b21, char __b20,
+ char __b19, char __b18, char __b17, char __b16,
+ char __b15, char __b14, char __b13, char __b12,
+ char __b11, char __b10, char __b09, char __b08,
+ char __b07, char __b06, char __b05, char __b04,
+ char __b03, char __b02, char __b01, char __b00)
+{
+ return (__m256i)(__v32qi){
+ __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
+ __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
+ __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
+ __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31
+ };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
+{
+ return (__m256i)(__v4di){ __d, __c, __b, __a };
+}
+
+/* Create vectors with elements in reverse order */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_setr_pd(double __a, double __b, double __c, double __d)
+{
+ return (__m256d){ __a, __b, __c, __d };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_setr_ps(float __a, float __b, float __c, float __d,
+ float __e, float __f, float __g, float __h)
+{
+ return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
+ int __i4, int __i5, int __i6, int __i7)
+{
+ return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
+ short __w11, short __w10, short __w09, short __w08,
+ short __w07, short __w06, short __w05, short __w04,
+ short __w03, short __w02, short __w01, short __w00)
+{
+ return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09,
+ __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
+ char __b27, char __b26, char __b25, char __b24,
+ char __b23, char __b22, char __b21, char __b20,
+ char __b19, char __b18, char __b17, char __b16,
+ char __b15, char __b14, char __b13, char __b12,
+ char __b11, char __b10, char __b09, char __b08,
+ char __b07, char __b06, char __b05, char __b04,
+ char __b03, char __b02, char __b01, char __b00)
+{
+ return (__m256i)(__v32qi){
+ __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24,
+ __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16,
+ __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08,
+ __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
+{
+ return (__m256i)(__v4di){ __a, __b, __c, __d };
+}
+
+/* Create vectors with repeated elements */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_set1_pd(double __w)
+{
+ return (__m256d){ __w, __w, __w, __w };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_set1_ps(float __w)
+{
+ return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi32(int __i)
+{
+ return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi16(short __w)
+{
+ return (__m256i)(__v16hi){ __w, __w, __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi8(char __b)
+{
+ return (__m256i)(__v32qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b, __b, __b };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi64x(long long __q)
+{
+ return (__m256i)(__v4di){ __q, __q, __q, __q };
+}
+
+/* Create __zeroed vectors */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_setzero_pd(void)
+{
+ return (__m256d){ 0, 0, 0, 0 };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_setzero_ps(void)
+{
+ return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setzero_si256(void)
+{
+ return (__m256i){ 0LL, 0LL, 0LL, 0LL };
+}
+
+/* Cast between vector types */
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_castpd_ps(__m256d __a)
+{
+ return (__m256)__a;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_castpd_si256(__m256d __a)
+{
+ return (__m256i)__a;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_castps_pd(__m256 __a)
+{
+ return (__m256d)__a;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_castps_si256(__m256 __a)
+{
+ return (__m256i)__a;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_castsi256_ps(__m256i __a)
+{
+ return (__m256)__a;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_castsi256_pd(__m256i __a)
+{
+ return (__m256d)__a;
+}
+
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm256_castpd256_pd128(__m256d __a)
+{
+ return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm256_castps256_ps128(__m256 __a)
+{
+ return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_castsi256_si128(__m256i __a)
+{
+ return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_castpd128_pd256(__m128d __a)
+{
+ return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_castps128_ps256(__m128 __a)
+{
+ return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_castsi128_si256(__m128i __a)
+{
+ return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1);
+}
+
+/*
+ Vector insert.
+ We use macros rather than inlines because we only want to accept
+ invocations where the immediate M is a constant expression.
+*/
+#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_shufflevector( \
+ (__v8sf)(__m256)(V1), \
+ (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
+ (((M) & 1) ? 0 : 8), \
+ (((M) & 1) ? 1 : 9), \
+ (((M) & 1) ? 2 : 10), \
+ (((M) & 1) ? 3 : 11), \
+ (((M) & 1) ? 8 : 4), \
+ (((M) & 1) ? 9 : 5), \
+ (((M) & 1) ? 10 : 6), \
+ (((M) & 1) ? 11 : 7) );})
+
+#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
+ (__m256d)__builtin_shufflevector( \
+ (__v4df)(__m256d)(V1), \
+ (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
+ (((M) & 1) ? 0 : 4), \
+ (((M) & 1) ? 1 : 5), \
+ (((M) & 1) ? 4 : 2), \
+ (((M) & 1) ? 5 : 3) );})
+
+#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector( \
+ (__v4di)(__m256i)(V1), \
+ (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
+ (((M) & 1) ? 0 : 4), \
+ (((M) & 1) ? 1 : 5), \
+ (((M) & 1) ? 4 : 2), \
+ (((M) & 1) ? 5 : 3) );})
+
+/*
+ Vector extract.
+ We use macros rather than inlines because we only want to accept
+ invocations where the immediate M is a constant expression.
+*/
+#define _mm256_extractf128_ps(V, M) __extension__ ({ \
+ (__m128)__builtin_shufflevector( \
+ (__v8sf)(__m256)(V), \
+ (__v8sf)(_mm256_undefined_ps()), \
+ (((M) & 1) ? 4 : 0), \
+ (((M) & 1) ? 5 : 1), \
+ (((M) & 1) ? 6 : 2), \
+ (((M) & 1) ? 7 : 3) );})
+
+#define _mm256_extractf128_pd(V, M) __extension__ ({ \
+ (__m128d)__builtin_shufflevector( \
+ (__v4df)(__m256d)(V), \
+ (__v4df)(_mm256_undefined_pd()), \
+ (((M) & 1) ? 2 : 0), \
+ (((M) & 1) ? 3 : 1) );})
+
+#define _mm256_extractf128_si256(V, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector( \
+ (__v4di)(__m256i)(V), \
+ (__v4di)(_mm256_undefined_si256()), \
+ (((M) & 1) ? 2 : 0), \
+ (((M) & 1) ? 3 : 1) );})
+
+/* SIMD load ops (unaligned) */
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
+{
+ __m256 __v256 = _mm256_castps128_ps256(_mm_loadu_ps(__addr_lo));
+ return _mm256_insertf128_ps(__v256, _mm_loadu_ps(__addr_hi), 1);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
+{
+ __m256d __v256 = _mm256_castpd128_pd256(_mm_loadu_pd(__addr_lo));
+ return _mm256_insertf128_pd(__v256, _mm_loadu_pd(__addr_hi), 1);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
+{
+ __m256i __v256 = _mm256_castsi128_si256(_mm_loadu_si128(__addr_lo));
+ return _mm256_insertf128_si256(__v256, _mm_loadu_si128(__addr_hi), 1);
+}
+
+/* SIMD store ops (unaligned) */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
+{
+ __m128 __v128;
+
+ __v128 = _mm256_castps256_ps128(__a);
+ _mm_storeu_ps(__addr_lo, __v128);
+ __v128 = _mm256_extractf128_ps(__a, 1);
+ _mm_storeu_ps(__addr_hi, __v128);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
+{
+ __m128d __v128;
+
+ __v128 = _mm256_castpd256_pd128(__a);
+ _mm_storeu_pd(__addr_lo, __v128);
+ __v128 = _mm256_extractf128_pd(__a, 1);
+ _mm_storeu_pd(__addr_hi, __v128);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
+{
+ __m128i __v128;
+
+ __v128 = _mm256_castsi256_si128(__a);
+ _mm_storeu_si128(__addr_lo, __v128);
+ __v128 = _mm256_extractf128_si256(__a, 1);
+ _mm_storeu_si128(__addr_hi, __v128);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_set_m128 (__m128 __hi, __m128 __lo) {
+ return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_set_m128d (__m128d __hi, __m128d __lo) {
+ return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_m128i (__m128i __hi, __m128i __lo) {
+ return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_setr_m128 (__m128 __lo, __m128 __hi) {
+ return _mm256_set_m128(__hi, __lo);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_setr_m128d (__m128d __lo, __m128d __hi) {
+ return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_m128i (__m128i __lo, __m128i __hi) {
+ return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVXINTRIN_H */
diff --git a/current/clang-include/bmi2intrin.h b/current/clang-include/bmi2intrin.h
new file mode 100644
index 0000000..fdae82c
--- /dev/null
+++ b/current/clang-include/bmi2intrin.h
@@ -0,0 +1,95 @@
+/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI2INTRIN_H
+#define __BMI2INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2")))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bzhi_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bzhi_si(__X, __Y);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_pdep_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pdep_si(__X, __Y);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_pext_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pext_si(__X, __Y);
+}
+
+#ifdef __x86_64__
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bzhi_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bzhi_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_pdep_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pdep_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_pext_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pext_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mulx_u64 (unsigned long long __X, unsigned long long __Y,
+ unsigned long long *__P)
+{
+ unsigned __int128 __res = (unsigned __int128) __X * __Y;
+ *__P = (unsigned long long) (__res >> 64);
+ return (unsigned long long) __res;
+}
+
+#else /* !__x86_64__ */
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P)
+{
+ unsigned long long __res = (unsigned long long) __X * __Y;
+ *__P = (unsigned int) (__res >> 32);
+ return (unsigned int) __res;
+}
+
+#endif /* !__x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __BMI2INTRIN_H */
diff --git a/current/clang-include/bmiintrin.h b/current/clang-include/bmiintrin.h
new file mode 100644
index 0000000..30acfae
--- /dev/null
+++ b/current/clang-include/bmiintrin.h
@@ -0,0 +1,548 @@
+/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMIINTRIN_H
+#define __BMIINTRIN_H
+
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned short _tzcnt_u16(unsigned short a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param a
+/// An unsigned 16-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 16-bit integer containing the number of trailing zero
+/// bits in the operand.
+#define _tzcnt_u16(a) (__tzcnt_u16((a)))
+
+/// \brief Performs a bitwise AND of the second operand with the one's
+/// complement of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _andn_u32(unsigned int a, unsigned int b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ANDN instruction.
+///
+/// \param a
+/// An unsigned integer containing one of the operands.
+/// \param b
+/// An unsigned integer containing one of the operands.
+/// \returns An unsigned integer containing the bitwise AND of the second
+/// operand with the one's complement of the first operand.
+#define _andn_u32(a, b) (__andn_u32((a), (b)))
+
+/* _bextr_u32 != __bextr_u32 */
+/// \brief Clears all bits in the source except for the least significant bit
+/// containing a value of 1 and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _blsi_u32(unsigned int a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSI instruction.
+///
+/// \param a
+/// An unsigned integer whose bits are to be cleared.
+/// \returns An unsigned integer containing the result of clearing the bits from
+/// the source operand.
+#define _blsi_u32(a) (__blsi_u32((a)))
+
+/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and
+/// including the least siginificant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _blsmsk_u32(unsigned int a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSMSK instruction.
+///
+/// \param a
+/// An unsigned integer used to create the mask.
+/// \returns An unsigned integer containing the newly created mask.
+#define _blsmsk_u32(a) (__blsmsk_u32((a)))
+
+/// \brief Clears the least siginificant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _blsr_u32(unsigned int a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSR instruction.
+///
+/// \param a
+/// An unsigned integer containing the operand to be cleared.
+/// \returns An unsigned integer containing the result of clearing the source
+/// operand.
+#define _blsr_u32(a) (__blsr_u32((a)))
+
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _tzcnt_u32(unsigned int a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param a
+/// An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 32-bit integer containing the number of trailing zero
+/// bits in the operand.
+#define _tzcnt_u32(a) (__tzcnt_u32((a)))
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
+
+/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT
+ instruction behaves as BSF on non-BMI targets, there is code that expects
+ to use it as a potentially faster version of BSF. */
+#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+/// An unsigned 16-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 16-bit integer containing the number of trailing zero
+/// bits in the operand.
+static __inline__ unsigned short __RELAXED_FN_ATTRS
+__tzcnt_u16(unsigned short __X)
+{
+ return __X ? __builtin_ctzs(__X) : 16;
+}
+
+/// \brief Performs a bitwise AND of the second operand with the one's
+/// complement of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ANDN instruction.
+///
+/// \param __X
+/// An unsigned integer containing one of the operands.
+/// \param __Y
+/// An unsigned integer containing one of the operands.
+/// \returns An unsigned integer containing the bitwise AND of the second
+/// operand with the one's complement of the first operand.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__andn_u32(unsigned int __X, unsigned int __Y)
+{
+ return ~__X & __Y;
+}
+
+/* AMD-specified, double-leading-underscore version of BEXTR */
+/// \brief Extracts the specified bits from the first operand and returns them
+/// in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BEXTR instruction.
+///
+/// \param __X
+/// An unsigned integer whose bits are to be extracted.
+/// \param __Y
+/// An unsigned integer used to specify which bits are extracted. Bits [7:0]
+/// specify the index of the least significant bit. Bits [15:8] specify the
+/// number of bits to be extracted.
+/// \returns An unsigned integer whose least significant bits contain the
+/// extracted bits.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__bextr_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
+/* Intel-specified, single-leading-underscore version of BEXTR */
+/// \brief Extracts the specified bits from the first operand and returns them
+/// in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BEXTR instruction.
+///
+/// \param __X
+/// An unsigned integer whose bits are to be extracted.
+/// \param __Y
+/// An unsigned integer used to specify the index of the least significant
+/// bit for the bits to be extracted. Bits [7:0] specify the index.
+/// \param __Z
+/// An unsigned integer used to specify the number of bits to be extracted.
+/// Bits [7:0] specify the number of bits.
+/// \returns An unsigned integer whose least significant bits contain the
+/// extracted bits.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
+{
+ return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
+}
+
+/// \brief Clears all bits in the source except for the least significant bit
+/// containing a value of 1 and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSI instruction.
+///
+/// \param __X
+/// An unsigned integer whose bits are to be cleared.
+/// \returns An unsigned integer containing the result of clearing the bits from
+/// the source operand.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsi_u32(unsigned int __X)
+{
+ return __X & -__X;
+}
+
+/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and
+/// including the least siginificant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSMSK instruction.
+///
+/// \param __X
+/// An unsigned integer used to create the mask.
+/// \returns An unsigned integer containing the newly created mask.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsmsk_u32(unsigned int __X)
+{
+ return __X ^ (__X - 1);
+}
+
+/// \brief Clears the least siginificant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSR instruction.
+///
+/// \param __X
+/// An unsigned integer containing the operand to be cleared.
+/// \returns An unsigned integer containing the result of clearing the source
+/// operand.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsr_u32(unsigned int __X)
+{
+ return __X & (__X - 1);
+}
+
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+/// An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 32-bit integer containing the number of trailing zero
+/// bits in the operand.
+static __inline__ unsigned int __RELAXED_FN_ATTRS
+__tzcnt_u32(unsigned int __X)
+{
+ return __X ? __builtin_ctz(__X) : 32;
+}
+
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+/// An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An 32-bit integer containing the number of trailing zero
+/// bits in the operand.
+static __inline__ int __RELAXED_FN_ATTRS
+_mm_tzcnt_32(unsigned int __X)
+{
+ return __X ? __builtin_ctz(__X) : 32;
+}
+
+#ifdef __x86_64__
+
+/// \brief Performs a bitwise AND of the second operand with the one's
+/// complement of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _andn_u64 (unsigned long long a, unsigned long long b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ANDN instruction.
+///
+/// \param a
+/// An unsigned 64-bit integer containing one of the operands.
+/// \param b
+/// An unsigned 64-bit integer containing one of the operands.
+/// \returns An unsigned 64-bit integer containing the bitwise AND of the second
+/// operand with the one's complement of the first operand.
+#define _andn_u64(a, b) (__andn_u64((a), (b)))
+
+/* _bextr_u64 != __bextr_u64 */
+/// \brief Clears all bits in the source except for the least significant bit
+/// containing a value of 1 and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _blsi_u64(unsigned long long a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSI instruction.
+///
+/// \param a
+/// An unsigned 64-bit integer whose bits are to be cleared.
+/// \returns An unsigned 64-bit integer containing the result of clearing the
+/// bits from the source operand.
+#define _blsi_u64(a) (__blsi_u64((a)))
+
+/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and
+/// including the least siginificant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _blsmsk_u64(unsigned long long a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSMSK instruction.
+///
+/// \param a
+/// An unsigned 64-bit integer used to create the mask.
+/// \returns A unsigned 64-bit integer containing the newly created mask.
+#define _blsmsk_u64(a) (__blsmsk_u64((a)))
+
+/// \brief Clears the least siginificant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _blsr_u64(unsigned long long a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSR instruction.
+///
+/// \param a
+/// An unsigned 64-bit integer containing the operand to be cleared.
+/// \returns An unsigned 64-bit integer containing the result of clearing the
+/// source operand.
+#define _blsr_u64(a) (__blsr_u64((a)))
+
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _tzcnt_u64(unsigned long long a);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param a
+/// An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 64-bit integer containing the number of trailing zero
+/// bits in the operand.
+#define _tzcnt_u64(a) (__tzcnt_u64((a)))
+
+/// \brief Performs a bitwise AND of the second operand with the one's
+/// complement of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ANDN instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer containing one of the operands.
+/// \param __Y
+/// An unsigned 64-bit integer containing one of the operands.
+/// \returns An unsigned 64-bit integer containing the bitwise AND of the second
+/// operand with the one's complement of the first operand.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__andn_u64 (unsigned long long __X, unsigned long long __Y)
+{
+ return ~__X & __Y;
+}
+
+/* AMD-specified, double-leading-underscore version of BEXTR */
+/// \brief Extracts the specified bits from the first operand and returns them
+/// in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BEXTR instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose bits are to be extracted.
+/// \param __Y
+/// An unsigned 64-bit integer used to specify which bits are extracted. Bits
+/// [7:0] specify the index of the least significant bit. Bits [15:8] specify
+/// the number of bits to be extracted.
+/// \returns An unsigned 64-bit integer whose least significant bits contain the
+/// extracted bits.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__bextr_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
+/* Intel-specified, single-leading-underscore version of BEXTR */
+/// \brief Extracts the specified bits from the first operand and returns them
+/// in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BEXTR instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose bits are to be extracted.
+/// \param __Y
+/// An unsigned integer used to specify the index of the least significant
+/// bit for the bits to be extracted. Bits [7:0] specify the index.
+/// \param __Z
+/// An unsigned integer used to specify the number of bits to be extracted.
+/// Bits [7:0] specify the number of bits.
+/// \returns An unsigned 64-bit integer whose least significant bits contain the
+/// extracted bits.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
+{
+ return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
+}
+
+/// \brief Clears all bits in the source except for the least significant bit
+/// containing a value of 1 and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSI instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose bits are to be cleared.
+/// \returns An unsigned 64-bit integer containing the result of clearing the
+/// bits from the source operand.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsi_u64(unsigned long long __X)
+{
+ return __X & -__X;
+}
+
+/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and
+/// including the least siginificant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSMSK instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer used to create the mask.
+/// \returns A unsigned 64-bit integer containing the newly created mask.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsmsk_u64(unsigned long long __X)
+{
+ return __X ^ (__X - 1);
+}
+
+/// \brief Clears the least siginificant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BLSR instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer containing the operand to be cleared.
+/// \returns An unsigned 64-bit integer containing the result of clearing the
+/// source operand.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsr_u64(unsigned long long __X)
+{
+ return __X & (__X - 1);
+}
+
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 64-bit integer containing the number of trailing zero
+/// bits in the operand.
+static __inline__ unsigned long long __RELAXED_FN_ATTRS
+__tzcnt_u64(unsigned long long __X)
+{
+ return __X ? __builtin_ctzll(__X) : 64;
+}
+
+/// \brief Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An 64-bit integer containing the number of trailing zero
+/// bits in the operand.
+static __inline__ long long __RELAXED_FN_ATTRS
+_mm_tzcnt_64(unsigned long long __X)
+{
+ return __X ? __builtin_ctzll(__X) : 64;
+}
+
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+#undef __RELAXED_FN_ATTRS
+
+#endif /* __BMIINTRIN_H */
diff --git a/current/clang-include/clflushoptintrin.h b/current/clang-include/clflushoptintrin.h
new file mode 100644
index 0000000..60e0ead
--- /dev/null
+++ b/current/clang-include/clflushoptintrin.h
@@ -0,0 +1,41 @@
+/*===---- clflushoptintrin.h - CLFLUSHOPT intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <clflushoptintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __CLFLUSHOPTINTRIN_H
+#define __CLFLUSHOPTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clflushopt")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_clflushopt(char * __m) {
+ __builtin_ia32_clflushopt(__m);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/cpuid.h b/current/clang-include/cpuid.h
new file mode 100644
index 0000000..5da02e0
--- /dev/null
+++ b/current/clang-include/cpuid.h
@@ -0,0 +1,209 @@
+/*===---- cpuid.h - X86 cpu model detection --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !(__x86_64__ || __i386__)
+#error this header is for x86 only
+#endif
+
+/* Responses identification request with %eax 0 */
+/* AMD: "AuthenticAMD" */
+#define signature_AMD_ebx 0x68747541
+#define signature_AMD_edx 0x69746e65
+#define signature_AMD_ecx 0x444d4163
+/* CENTAUR: "CentaurHauls" */
+#define signature_CENTAUR_ebx 0x746e6543
+#define signature_CENTAUR_edx 0x48727561
+#define signature_CENTAUR_ecx 0x736c7561
+/* CYRIX: "CyrixInstead" */
+#define signature_CYRIX_ebx 0x69727943
+#define signature_CYRIX_edx 0x736e4978
+#define signature_CYRIX_ecx 0x64616574
+/* INTEL: "GenuineIntel" */
+#define signature_INTEL_ebx 0x756e6547
+#define signature_INTEL_edx 0x49656e69
+#define signature_INTEL_ecx 0x6c65746e
+/* TM1: "TransmetaCPU" */
+#define signature_TM1_ebx 0x6e617254
+#define signature_TM1_edx 0x74656d73
+#define signature_TM1_ecx 0x55504361
+/* TM2: "GenuineTMx86" */
+#define signature_TM2_ebx 0x756e6547
+#define signature_TM2_edx 0x54656e69
+#define signature_TM2_ecx 0x3638784d
+/* NSC: "Geode by NSC" */
+#define signature_NSC_ebx 0x646f6547
+#define signature_NSC_edx 0x43534e20
+#define signature_NSC_ecx 0x79622065
+/* NEXGEN: "NexGenDriven" */
+#define signature_NEXGEN_ebx 0x4778654e
+#define signature_NEXGEN_edx 0x72446e65
+#define signature_NEXGEN_ecx 0x6e657669
+/* RISE: "RiseRiseRise" */
+#define signature_RISE_ebx 0x65736952
+#define signature_RISE_edx 0x65736952
+#define signature_RISE_ecx 0x65736952
+/* SIS: "SiS SiS SiS " */
+#define signature_SIS_ebx 0x20536953
+#define signature_SIS_edx 0x20536953
+#define signature_SIS_ecx 0x20536953
+/* UMC: "UMC UMC UMC " */
+#define signature_UMC_ebx 0x20434d55
+#define signature_UMC_edx 0x20434d55
+#define signature_UMC_ecx 0x20434d55
+/* VIA: "VIA VIA VIA " */
+#define signature_VIA_ebx 0x20414956
+#define signature_VIA_edx 0x20414956
+#define signature_VIA_ecx 0x20414956
+/* VORTEX: "Vortex86 SoC" */
+#define signature_VORTEX_ebx 0x74726f56
+#define signature_VORTEX_edx 0x36387865
+#define signature_VORTEX_ecx 0x436f5320
+
+/* Features in %ecx for level 1 */
+#define bit_SSE3 0x00000001
+#define bit_PCLMULQDQ 0x00000002
+#define bit_DTES64 0x00000004
+#define bit_MONITOR 0x00000008
+#define bit_DSCPL 0x00000010
+#define bit_VMX 0x00000020
+#define bit_SMX 0x00000040
+#define bit_EIST 0x00000080
+#define bit_TM2 0x00000100
+#define bit_SSSE3 0x00000200
+#define bit_CNXTID 0x00000400
+#define bit_FMA 0x00001000
+#define bit_CMPXCHG16B 0x00002000
+#define bit_xTPR 0x00004000
+#define bit_PDCM 0x00008000
+#define bit_PCID 0x00020000
+#define bit_DCA 0x00040000
+#define bit_SSE41 0x00080000
+#define bit_SSE42 0x00100000
+#define bit_x2APIC 0x00200000
+#define bit_MOVBE 0x00400000
+#define bit_POPCNT 0x00800000
+#define bit_TSCDeadline 0x01000000
+#define bit_AESNI 0x02000000
+#define bit_XSAVE 0x04000000
+#define bit_OSXSAVE 0x08000000
+#define bit_AVX 0x10000000
+#define bit_RDRND 0x40000000
+
+/* Features in %edx for level 1 */
+#define bit_FPU 0x00000001
+#define bit_VME 0x00000002
+#define bit_DE 0x00000004
+#define bit_PSE 0x00000008
+#define bit_TSC 0x00000010
+#define bit_MSR 0x00000020
+#define bit_PAE 0x00000040
+#define bit_MCE 0x00000080
+#define bit_CX8 0x00000100
+#define bit_APIC 0x00000200
+#define bit_SEP 0x00000800
+#define bit_MTRR 0x00001000
+#define bit_PGE 0x00002000
+#define bit_MCA 0x00004000
+#define bit_CMOV 0x00008000
+#define bit_PAT 0x00010000
+#define bit_PSE36 0x00020000
+#define bit_PSN 0x00040000
+#define bit_CLFSH 0x00080000
+#define bit_DS 0x00200000
+#define bit_ACPI 0x00400000
+#define bit_MMX 0x00800000
+#define bit_FXSR 0x01000000
+#define bit_FXSAVE bit_FXSR /* for gcc compat */
+#define bit_SSE 0x02000000
+#define bit_SSE2 0x04000000
+#define bit_SS 0x08000000
+#define bit_HTT 0x10000000
+#define bit_TM 0x20000000
+#define bit_PBE 0x80000000
+
+/* Features in %ebx for level 7 sub-leaf 0 */
+#define bit_FSGSBASE 0x00000001
+#define bit_SMEP 0x00000080
+#define bit_ENH_MOVSB 0x00000200
+
+#if __i386__
+#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
+ __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level))
+
+#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
+ __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level), "2"(__count))
+#else
+/* x86-64 uses %rbx as the base register, so preserve it. */
+#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
+ __asm(" xchgq %%rbx,%q1\n" \
+ " cpuid\n" \
+ " xchgq %%rbx,%q1" \
+ : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level))
+
+#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
+ __asm(" xchgq %%rbx,%q1\n" \
+ " cpuid\n" \
+ " xchgq %%rbx,%q1" \
+ : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level), "2"(__count))
+#endif
+
+static __inline int __get_cpuid (unsigned int __level, unsigned int *__eax,
+ unsigned int *__ebx, unsigned int *__ecx,
+ unsigned int *__edx) {
+ __cpuid(__level, *__eax, *__ebx, *__ecx, *__edx);
+ return 1;
+}
+
+static __inline int __get_cpuid_max (unsigned int __level, unsigned int *__sig)
+{
+ unsigned int __eax, __ebx, __ecx, __edx;
+#if __i386__
+ int __cpuid_supported;
+
+ __asm(" pushfl\n"
+ " popl %%eax\n"
+ " movl %%eax,%%ecx\n"
+ " xorl $0x00200000,%%eax\n"
+ " pushl %%eax\n"
+ " popfl\n"
+ " pushfl\n"
+ " popl %%eax\n"
+ " movl $0,%0\n"
+ " cmpl %%eax,%%ecx\n"
+ " je 1f\n"
+ " movl $1,%0\n"
+ "1:"
+ : "=r" (__cpuid_supported) : : "eax", "ecx");
+ if (!__cpuid_supported)
+ return 0;
+#endif
+
+ __cpuid(__level, __eax, __ebx, __ecx, __edx);
+ if (__sig)
+ *__sig = __ebx;
+ return __eax;
+}
diff --git a/current/clang-include/cuda_builtin_vars.h b/current/clang-include/cuda_builtin_vars.h
new file mode 100644
index 0000000..6f5eb9c
--- /dev/null
+++ b/current/clang-include/cuda_builtin_vars.h
@@ -0,0 +1,126 @@
+/*===---- cuda_builtin_vars.h - CUDA built-in variables ---------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CUDA_BUILTIN_VARS_H
+#define __CUDA_BUILTIN_VARS_H
+
+// Forward declares from vector_types.h.
+struct uint3;
+struct dim3;
+
+// The file implements built-in CUDA variables using __declspec(property).
+// https://msdn.microsoft.com/en-us/library/yhfk0thd.aspx
+// All read accesses of built-in variable fields get converted into calls to a
+// getter function which in turn calls the appropriate builtin to fetch the
+// value.
+//
+// Example:
+// int x = threadIdx.x;
+// IR output:
+// %0 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() #3
+// PTX output:
+// mov.u32 %r2, %tid.x;
+
+#define __CUDA_DEVICE_BUILTIN(FIELD, INTRINSIC) \
+ __declspec(property(get = __fetch_builtin_##FIELD)) unsigned int FIELD; \
+ static inline __attribute__((always_inline)) \
+ __attribute__((device)) unsigned int __fetch_builtin_##FIELD(void) { \
+ return INTRINSIC; \
+ }
+
+#if __cplusplus >= 201103L
+#define __DELETE =delete
+#else
+#define __DELETE
+#endif
+
+// Make sure nobody can create instances of the special varible types. nvcc
+// also disallows taking address of special variables, so we disable address-of
+// operator as well.
+#define __CUDA_DISALLOW_BUILTINVAR_ACCESS(TypeName) \
+ __attribute__((device)) TypeName() __DELETE; \
+ __attribute__((device)) TypeName(const TypeName &) __DELETE; \
+ __attribute__((device)) void operator=(const TypeName &) const __DELETE; \
+ __attribute__((device)) TypeName *operator&() const __DELETE
+
+struct __cuda_builtin_threadIdx_t {
+ __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_tid_x());
+ __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_tid_y());
+ __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_tid_z());
+ // threadIdx should be convertible to uint3 (in fact in nvcc, it *is* a
+ // uint3). This function is defined after we pull in vector_types.h.
+ __attribute__((device)) operator uint3() const;
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t);
+};
+
+struct __cuda_builtin_blockIdx_t {
+ __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ctaid_x());
+ __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ctaid_y());
+ __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ctaid_z());
+ // blockIdx should be convertible to uint3 (in fact in nvcc, it *is* a
+ // uint3). This function is defined after we pull in vector_types.h.
+ __attribute__((device)) operator uint3() const;
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t);
+};
+
+struct __cuda_builtin_blockDim_t {
+ __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ntid_x());
+ __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ntid_y());
+ __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ntid_z());
+ // blockDim should be convertible to dim3 (in fact in nvcc, it *is* a
+ // dim3). This function is defined after we pull in vector_types.h.
+ __attribute__((device)) operator dim3() const;
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t);
+};
+
+struct __cuda_builtin_gridDim_t {
+ __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_nctaid_x());
+ __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_nctaid_y());
+ __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_nctaid_z());
+ // gridDim should be convertible to dim3 (in fact in nvcc, it *is* a
+ // dim3). This function is defined after we pull in vector_types.h.
+ __attribute__((device)) operator dim3() const;
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t);
+};
+
+#define __CUDA_BUILTIN_VAR \
+ extern const __attribute__((device)) __attribute__((weak))
+__CUDA_BUILTIN_VAR __cuda_builtin_threadIdx_t threadIdx;
+__CUDA_BUILTIN_VAR __cuda_builtin_blockIdx_t blockIdx;
+__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim;
+__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim;
+
+// warpSize should translate to read of %WARP_SZ but there's currently no
+// builtin to do so. According to PTX v4.2 docs 'to date, all target
+// architectures have a WARP_SZ value of 32'.
+__attribute__((device)) const int warpSize = 32;
+
+#undef __CUDA_DEVICE_BUILTIN
+#undef __CUDA_BUILTIN_VAR
+#undef __CUDA_DISALLOW_BUILTINVAR_ACCESS
+
+#endif /* __CUDA_BUILTIN_VARS_H */
diff --git a/current/clang-include/emmintrin.h b/current/clang-include/emmintrin.h
new file mode 100644
index 0000000..c78d059
--- /dev/null
+++ b/current/clang-include/emmintrin.h
@@ -0,0 +1,2458 @@
+/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __EMMINTRIN_H
+#define __EMMINTRIN_H
+
+#include <xmmintrin.h>
+
+typedef double __m128d __attribute__((__vector_size__(16)));
+typedef long long __m128i __attribute__((__vector_size__(16)));
+
+/* Type defines. */
+typedef double __v2df __attribute__ ((__vector_size__ (16)));
+typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+typedef short __v8hi __attribute__((__vector_size__(16)));
+typedef char __v16qi __attribute__((__vector_size__(16)));
+
+/* Unsigned types */
+typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16)));
+typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
+typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
+
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v16qs __attribute__((__vector_size__(16)));
+
+#include <f16cintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_add_sd(__m128d __a, __m128d __b)
+{
+ __a[0] += __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_add_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v2df)__a + (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sub_sd(__m128d __a, __m128d __b)
+{
+ __a[0] -= __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sub_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v2df)__a - (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mul_sd(__m128d __a, __m128d __b)
+{
+ __a[0] *= __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mul_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v2df)__a * (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_div_sd(__m128d __a, __m128d __b)
+{
+ __a[0] /= __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_div_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v2df)__a / (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sqrt_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sqrt_pd(__m128d __a)
+{
+ return __builtin_ia32_sqrtpd((__v2df)__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_min_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_min_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_max_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_max_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_and_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v4su)__a & (__v4su)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_andnot_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)(~(__v4su)__a & (__v4su)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_or_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v4su)__a | (__v4su)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_xor_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v4su)__a ^ (__v4su)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpeq_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmplt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmple_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpgt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpge_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpord_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpunord_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpneq_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnlt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnle_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpngt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnge_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpeq_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmplt_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmple_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpgt_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpge_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpord_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpunord_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpneq_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnlt_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnle_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpngt_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnge_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comieq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comilt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comile_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comigt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comige_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comineq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomieq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomilt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomile_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomigt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomige_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomineq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpd_ps(__m128d __a)
+{
+ return __builtin_ia32_cvtpd2ps((__v2df)__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtps_pd(__m128 __a)
+{
+ return (__m128d) __builtin_convertvector(
+ __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepi32_pd(__m128i __a)
+{
+ return (__m128d) __builtin_convertvector(
+ __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epi32(__m128d __a)
+{
+ return __builtin_ia32_cvtpd2dq((__v2df)__a);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtsd_si32(__m128d __a)
+{
+ return __builtin_ia32_cvtsd2si((__v2df)__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtsd_ss(__m128 __a, __m128d __b)
+{
+ __a[0] = __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtsi32_sd(__m128d __a, int __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtss_sd(__m128d __a, __m128 __b)
+{
+ __a[0] = __b[0];
+ return __a;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epi32(__m128d __a)
+{
+ return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvttsd_si32(__m128d __a)
+{
+ return __a[0];
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtpd_pi32(__m128d __a)
+{
+ return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvttpd_pi32(__m128d __a)
+{
+ return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtpi32_pd(__m64 __a)
+{
+ return __builtin_ia32_cvtpi2pd((__v2si)__a);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS
+_mm_cvtsd_f64(__m128d __a)
+{
+ return __a[0];
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_load_pd(double const *__dp)
+{
+ return *(__m128d*)__dp;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_load1_pd(double const *__dp)
+{
+ struct __mm_load1_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_load1_pd_struct*)__dp)->__u;
+ return (__m128d){ __u, __u };
+}
+
+#define _mm_load_pd1(dp) _mm_load1_pd(dp)
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadr_pd(double const *__dp)
+{
+ __m128d __u = *(__m128d*)__dp;
+ return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadu_pd(double const *__dp)
+{
+ struct __loadu_pd {
+ __m128d __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_pd*)__dp)->__v;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadu_si64(void const *__a)
+{
+ struct __loadu_si64 {
+ long long __v;
+ } __attribute__((__packed__, __may_alias__));
+ long long __u = ((struct __loadu_si64*)__a)->__v;
+ return (__m128i){__u, 0L};
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_load_sd(double const *__dp)
+{
+ struct __mm_load_sd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_load_sd_struct*)__dp)->__u;
+ return (__m128d){ __u, 0 };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadh_pd(__m128d __a, double const *__dp)
+{
+ struct __mm_loadh_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_loadh_pd_struct*)__dp)->__u;
+ return (__m128d){ __a[0], __u };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadl_pd(__m128d __a, double const *__dp)
+{
+ struct __mm_loadl_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_loadl_pd_struct*)__dp)->__u;
+ return (__m128d){ __u, __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_undefined_pd(void)
+{
+ return (__m128d)__builtin_ia32_undef128();
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_set_sd(double __w)
+{
+ return (__m128d){ __w, 0 };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_set1_pd(double __w)
+{
+ return (__m128d){ __w, __w };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_set_pd(double __w, double __x)
+{
+ return (__m128d){ __x, __w };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_setr_pd(double __w, double __x)
+{
+ return (__m128d){ __w, __x };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_setzero_pd(void)
+{
+ return (__m128d){ 0, 0 };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_move_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d){ __b[0], __a[1] };
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_sd(double *__dp, __m128d __a)
+{
+ struct __mm_store_sd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_pd(double *__dp, __m128d __a)
+{
+ *(__m128d*)__dp = __a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store1_pd(double *__dp, __m128d __a)
+{
+ __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
+ _mm_store_pd(__dp, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_pd1(double *__dp, __m128d __a)
+{
+ return _mm_store1_pd(__dp, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_pd(double *__dp, __m128d __a)
+{
+ struct __storeu_pd {
+ __m128d __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_pd*)__dp)->__v = __a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storer_pd(double *__dp, __m128d __a)
+{
+ __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);
+ *(__m128d *)__dp = __a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeh_pd(double *__dp, __m128d __a)
+{
+ struct __mm_storeh_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storel_pd(double *__dp, __m128d __a)
+{
+ struct __mm_storeh_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v16qu)__a + (__v16qu)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hu)__a + (__v8hu)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4su)__a + (__v4su)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_si64(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a + (__v2du)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_avg_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_avg_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_madd_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mulhi_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mulhi_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
+}
+
+/// \brief Multiplies the corresponding elements of two [8 x short] vectors and
+/// returns a vector containing the low-order 16 bits of each 32-bit product
+/// in the corresponding element.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULLW / PMULLW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 128-bit integer vector containing one of the source operands.
+/// \returns A 128-bit integer vector containing the products of both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mullo_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hu)__a * (__v8hu)__b);
+}
+
+/// \brief Multiplies 32-bit unsigned integer values contained in the lower bits
+/// of the two 64-bit integer vectors and returns the 64-bit unsigned
+/// product.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMULUDQ instruction.
+///
+/// \param __a
+/// A 64-bit integer containing one of the source operands.
+/// \param __b
+/// A 64-bit integer containing one of the source operands.
+/// \returns A 64-bit integer vector containing the product of both operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mul_su32(__m64 __a, __m64 __b)
+{
+ return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
+}
+
+/// \brief Multiplies 32-bit unsigned integer values contained in the lower
+/// bits of the corresponding elements of two [2 x i64] vectors, and returns
+/// the 64-bit products in the corresponding elements of a [2 x i64] vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULUDQ / PMULUDQ instruction.
+///
+/// \param __a
+/// A [2 x i64] vector containing one of the source operands.
+/// \param __b
+/// A [2 x i64] vector containing one of the source operands.
+/// \returns A [2 x i64] vector containing the product of both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mul_epu32(__m128i __a, __m128i __b)
+{
+ return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
+}
+
+/// \brief Computes the absolute differences of corresponding 8-bit integer
+/// values in two 128-bit vectors. Sums the first 8 absolute differences, and
+/// separately sums the second 8 absolute differences. Packss these two
+/// unsigned 16-bit integer sums into the upper and lower elements of a
+/// [2 x i64] vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSADBW / PSADBW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 128-bit integer vector containing one of the source operands.
+/// \returns A [2 x i64] vector containing the sums of the sets of absolute
+/// differences between both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sad_epu8(__m128i __a, __m128i __b)
+{
+ return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
+}
+
+/// \brief Subtracts the corresponding 8-bit integer values in the operands.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBB / PSUBB instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the minuends.
+/// \param __b
+/// A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+/// in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v16qu)__a - (__v16qu)__b);
+}
+
+/// \brief Subtracts the corresponding 16-bit integer values in the operands.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBW / PSUBW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the minuends.
+/// \param __b
+/// A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+/// in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hu)__a - (__v8hu)__b);
+}
+
+/// \brief Subtracts the corresponding 32-bit integer values in the operands.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBD / PSUBD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the minuends.
+/// \param __b
+/// A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+/// in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4su)__a - (__v4su)__b);
+}
+
+/// \brief Subtracts signed or unsigned 64-bit integer values and writes the
+/// difference to the corresponding bits in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBQ instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing the minuend.
+/// \param __b
+/// A 64-bit integer vector containing the subtrahend.
+/// \returns A 64-bit integer vector containing the difference of the values in
+/// the operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_si64(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
+}
+
+/// \brief Subtracts the corresponding elements of two [2 x i64] vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBQ / PSUBQ instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the minuends.
+/// \param __b
+/// A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+/// in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a - (__v2du)__b);
+}
+
+/// \brief Subtracts corresponding 8-bit signed integer values in the input and
+/// returns the differences in the corresponding bytes in the destination.
+/// Differences greater than 7Fh are saturated to 7Fh, and differences less
+/// than 80h are saturated to 80h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBSB / PSUBSB instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the minuends.
+/// \param __b
+/// A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+/// in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
+}
+
+/// \brief Subtracts corresponding 16-bit signed integer values in the input and
+/// returns the differences in the corresponding bytes in the destination.
+/// Differences greater than 7FFFh are saturated to 7FFFh, and values less
+/// than 8000h are saturated to 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBSW / PSUBSW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the minuends.
+/// \param __b
+/// A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the differences of the values
+/// in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+/// \brief Subtracts corresponding 8-bit unsigned integer values in the input
+/// and returns the differences in the corresponding bytes in the
+/// destination. Differences less than 00h are saturated to 00h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBUSB / PSUBUSB instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the minuends.
+/// \param __b
+/// A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the unsigned integer
+/// differences of the values in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
+}
+
+/// \brief Subtracts corresponding 16-bit unsigned integer values in the input
+/// and returns the differences in the corresponding bytes in the
+/// destination. Differences less than 0000h are saturated to 0000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBUSW / PSUBUSW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the minuends.
+/// \param __b
+/// A 128-bit integer vector containing the subtrahends.
+/// \returns A 128-bit integer vector containing the unsigned integer
+/// differences of the values in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
+}
+
+/// \brief Performs a bitwise AND of two 128-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPAND / PAND instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 128-bit integer vector containing one of the source operands.
+/// \returns A 128-bit integer vector containing the bitwise AND of the values
+/// in both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_and_si128(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a & (__v2du)__b);
+}
+
+/// \brief Performs a bitwise AND of two 128-bit integer vectors, using the
+/// one's complement of the values contained in the first source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPANDN / PANDN instruction.
+///
+/// \param __a
+/// A 128-bit vector containing the left source operand. The one's complement
+/// of this value is used in the bitwise AND.
+/// \param __b
+/// A 128-bit vector containing the right source operand.
+/// \returns A 128-bit integer vector containing the bitwise AND of the one's
+/// complement of the first operand and the values in the second operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_andnot_si128(__m128i __a, __m128i __b)
+{
+ return (__m128i)(~(__v2du)__a & (__v2du)__b);
+}
+/// \brief Performs a bitwise OR of two 128-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPOR / POR instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 128-bit integer vector containing one of the source operands.
+/// \returns A 128-bit integer vector containing the bitwise OR of the values
+/// in both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_or_si128(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a | (__v2du)__b);
+}
+
+/// \brief Performs a bitwise exclusive OR of two 128-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPXOR / PXOR instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 128-bit integer vector containing one of the source operands.
+/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
+/// values in both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_xor_si128(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a ^ (__v2du)__b);
+}
+
+/// \brief Left-shifts the 128-bit integer vector operand by the specified
+/// number of bytes. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_slli_si128(__m128i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSLLDQ / PSLLDQ instruction.
+///
+/// \param a
+/// A 128-bit integer vector containing the source operand.
+/// \param imm
+/// An immediate value specifying the number of bytes to left-shift
+/// operand a.
+/// \returns A 128-bit integer vector containing the left-shifted value.
+#define _mm_slli_si128(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector( \
+ (__v16qi)_mm_setzero_si128(), \
+ (__v16qi)(__m128i)(a), \
+ ((char)(imm)&0xF0) ? 0 : 16 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 1 : 17 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 2 : 18 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 3 : 19 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 4 : 20 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 5 : 21 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 6 : 22 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 7 : 23 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 8 : 24 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 9 : 25 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 10 : 26 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 11 : 27 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 12 : 28 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 13 : 29 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 14 : 30 - (char)(imm), \
+ ((char)(imm)&0xF0) ? 15 : 31 - (char)(imm)); })
+
+#define _mm_bslli_si128(a, imm) \
+ _mm_slli_si128((a), (imm))
+
+/// \brief Left-shifts each 16-bit value in the 128-bit integer vector operand
+/// by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLW / PSLLW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// An integer value specifying the number of bits to left-shift each value
+/// in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_slli_epi16(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
+}
+
+/// \brief Left-shifts each 16-bit value in the 128-bit integer vector operand
+/// by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLW / PSLLW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// A 128-bit integer vector in which bits [63:0] specify the number of bits
+/// to left-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sll_epi16(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
+}
+
+/// \brief Left-shifts each 32-bit value in the 128-bit integer vector operand
+/// by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLD / PSLLD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// An integer value specifying the number of bits to left-shift each value
+/// in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_slli_epi32(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
+}
+
+/// \brief Left-shifts each 32-bit value in the 128-bit integer vector operand
+/// by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLD / PSLLD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// A 128-bit integer vector in which bits [63:0] specify the number of bits
+/// to left-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sll_epi32(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
+}
+
+/// \brief Left-shifts each 64-bit value in the 128-bit integer vector operand
+/// by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLQ / PSLLQ instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// An integer value specifying the number of bits to left-shift each value
+/// in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_slli_epi64(__m128i __a, int __count)
+{
+ return __builtin_ia32_psllqi128((__v2di)__a, __count);
+}
+
+/// \brief Left-shifts each 64-bit value in the 128-bit integer vector operand
+/// by the specified number of bits. Low-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLQ / PSLLQ instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// A 128-bit integer vector in which bits [63:0] specify the number of bits
+/// to left-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the left-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sll_epi64(__m128i __a, __m128i __count)
+{
+ return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
+}
+
+/// \brief Right-shifts each 16-bit value in the 128-bit integer vector operand
+/// by the specified number of bits. High-order bits are filled with the sign
+/// bit of the initial value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAW / PSRAW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// An integer value specifying the number of bits to right-shift each value
+/// in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srai_epi16(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
+}
+
+/// \brief Right-shifts each 16-bit value in the 128-bit integer vector operand
+/// by the specified number of bits. High-order bits are filled with the sign
+/// bit of the initial value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAW / PSRAW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// A 128-bit integer vector in which bits [63:0] specify the number of bits
+/// to right-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sra_epi16(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
+}
+
+/// \brief Right-shifts each 32-bit value in the 128-bit integer vector operand
+/// by the specified number of bits. High-order bits are filled with the sign
+/// bit of the initial value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAD / PSRAD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// An integer value specifying the number of bits to right-shift each value
+/// in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srai_epi32(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
+}
+
+/// \brief Right-shifts each 32-bit value in the 128-bit integer vector operand
+/// by the specified number of bits. High-order bits are filled with the sign
+/// bit of the initial value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAD / PSRAD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// A 128-bit integer vector in which bits [63:0] specify the number of bits
+/// to right-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sra_epi32(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
+}
+
+/// \brief Right-shifts the 128-bit integer vector operand by the specified
+/// number of bytes. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_srli_si128(__m128i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSRLDQ / PSRLDQ instruction.
+///
+/// \param a
+/// A 128-bit integer vector containing the source operand.
+/// \param imm
+/// An immediate value specifying the number of bytes to right-shift operand
+/// a.
+/// \returns A 128-bit integer vector containing the right-shifted value.
+#define _mm_srli_si128(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector( \
+ (__v16qi)(__m128i)(a), \
+ (__v16qi)_mm_setzero_si128(), \
+ ((char)(imm)&0xF0) ? 16 : (char)(imm) + 0, \
+ ((char)(imm)&0xF0) ? 17 : (char)(imm) + 1, \
+ ((char)(imm)&0xF0) ? 18 : (char)(imm) + 2, \
+ ((char)(imm)&0xF0) ? 19 : (char)(imm) + 3, \
+ ((char)(imm)&0xF0) ? 20 : (char)(imm) + 4, \
+ ((char)(imm)&0xF0) ? 21 : (char)(imm) + 5, \
+ ((char)(imm)&0xF0) ? 22 : (char)(imm) + 6, \
+ ((char)(imm)&0xF0) ? 23 : (char)(imm) + 7, \
+ ((char)(imm)&0xF0) ? 24 : (char)(imm) + 8, \
+ ((char)(imm)&0xF0) ? 25 : (char)(imm) + 9, \
+ ((char)(imm)&0xF0) ? 26 : (char)(imm) + 10, \
+ ((char)(imm)&0xF0) ? 27 : (char)(imm) + 11, \
+ ((char)(imm)&0xF0) ? 28 : (char)(imm) + 12, \
+ ((char)(imm)&0xF0) ? 29 : (char)(imm) + 13, \
+ ((char)(imm)&0xF0) ? 30 : (char)(imm) + 14, \
+ ((char)(imm)&0xF0) ? 31 : (char)(imm) + 15); })
+
+#define _mm_bsrli_si128(a, imm) \
+ _mm_srli_si128((a), (imm))
+
+/// \brief Right-shifts each of 16-bit values in the 128-bit integer vector
+/// operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLW / PSRLW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// An integer value specifying the number of bits to right-shift each value
+/// in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srli_epi16(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
+}
+
+/// \brief Right-shifts each of 16-bit values in the 128-bit integer vector
+/// operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLW / PSRLW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// A 128-bit integer vector in which bits [63:0] specify the number of bits
+/// to right-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srl_epi16(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
+}
+
+/// \brief Right-shifts each of 32-bit values in the 128-bit integer vector
+/// operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLD / PSRLD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// An integer value specifying the number of bits to right-shift each value
+/// in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srli_epi32(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
+}
+
+/// \brief Right-shifts each of 32-bit values in the 128-bit integer vector
+/// operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLD / PSRLD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// A 128-bit integer vector in which bits [63:0] specify the number of bits
+/// to right-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srl_epi32(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
+}
+
+/// \brief Right-shifts each of 64-bit values in the 128-bit integer vector
+/// operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLQ / PSRLQ instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// An integer value specifying the number of bits to right-shift each value
+/// in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srli_epi64(__m128i __a, int __count)
+{
+ return __builtin_ia32_psrlqi128((__v2di)__a, __count);
+}
+
+/// \brief Right-shifts each of 64-bit values in the 128-bit integer vector
+/// operand by the specified number of bits. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLQ / PSRLQ instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the source operand.
+/// \param __count
+/// A 128-bit integer vector in which bits [63:0] specify the number of bits
+/// to right-shift each value in operand __a.
+/// \returns A 128-bit integer vector containing the right-shifted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srl_epi64(__m128i __a, __m128i __count)
+{
+ return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
+}
+
+/// \brief Compares each of the corresponding 8-bit values of the 128-bit
+/// integer vectors for equality. Each comparison yields 0h for false, FFh
+/// for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQB / PCMPEQB instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \param __b
+/// A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v16qi)__a == (__v16qi)__b);
+}
+
+/// \brief Compares each of the corresponding 16-bit values of the 128-bit
+/// integer vectors for equality. Each comparison yields 0h for false, FFFFh
+/// for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQW / PCMPEQW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \param __b
+/// A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a == (__v8hi)__b);
+}
+
+/// \brief Compares each of the corresponding 32-bit values of the 128-bit
+/// integer vectors for equality. Each comparison yields 0h for false,
+/// FFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQD / PCMPEQD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \param __b
+/// A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4si)__a == (__v4si)__b);
+}
+
+/// \brief Compares each of the corresponding signed 8-bit values of the 128-bit
+/// integer vectors to determine if the values in the first operand are
+/// greater than those in the second operand. Each comparison yields 0h for
+/// false, FFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTB / PCMPGTB instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \param __b
+/// A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi8(__m128i __a, __m128i __b)
+{
+ /* This function always performs a signed comparison, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)((__v16qs)__a > (__v16qs)__b);
+}
+
+/// \brief Compares each of the corresponding signed 16-bit values of the
+/// 128-bit integer vectors to determine if the values in the first operand
+/// are greater than those in the second operand. Each comparison yields 0h
+/// for false, FFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTW / PCMPGTW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \param __b
+/// A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a > (__v8hi)__b);
+}
+
+/// \brief Compares each of the corresponding signed 32-bit values of the
+/// 128-bit integer vectors to determine if the values in the first operand
+/// are greater than those in the second operand. Each comparison yields 0h
+/// for false, FFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTD / PCMPGTD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \param __b
+/// A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4si)__a > (__v4si)__b);
+}
+
+/// \brief Compares each of the corresponding signed 8-bit values of the 128-bit
+/// integer vectors to determine if the values in the first operand are less
+/// than those in the second operand. Each comparison yields 0h for false,
+/// FFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTB / PCMPGTB instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \param __b
+/// A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmplt_epi8(__m128i __a, __m128i __b)
+{
+ return _mm_cmpgt_epi8(__b, __a);
+}
+
+/// \brief Compares each of the corresponding signed 16-bit values of the
+/// 128-bit integer vectors to determine if the values in the first operand
+/// are less than those in the second operand. Each comparison yields 0h for
+/// false, FFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTW / PCMPGTW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \param __b
+/// A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmplt_epi16(__m128i __a, __m128i __b)
+{
+ return _mm_cmpgt_epi16(__b, __a);
+}
+
+/// \brief Compares each of the corresponding signed 32-bit values of the
+/// 128-bit integer vectors to determine if the values in the first operand
+/// are less than those in the second operand. Each comparison yields 0h for
+/// false, FFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTD / PCMPGTD instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \param __b
+/// A 128-bit integer vector.
+/// \returns A 128-bit integer vector containing the comparison results.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmplt_epi32(__m128i __a, __m128i __b)
+{
+ return _mm_cmpgt_epi32(__b, __a);
+}
+
+#ifdef __x86_64__
+/// \brief Converts a 64-bit signed integer value from the second operand into a
+/// double-precision value and returns it in the lower element of a [2 x
+/// double] vector; the upper element of the returned vector is copied from
+/// the upper element of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSI2SD / CVTSI2SD instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The upper 64 bits of this operand are
+/// copied to the upper 64 bits of the destination.
+/// \param __b
+/// A 64-bit signed integer operand containing the value to be converted.
+/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
+/// converted value of the second operand. The upper 64 bits are copied from
+/// the upper 64 bits of the first operand.
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtsi64_sd(__m128d __a, long long __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+/// \brief Converts the first (lower) element of a vector of [2 x double] into a
+/// 64-bit signed integer value, according to the current rounding mode.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSD2SI / CVTSD2SI instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
+/// conversion.
+/// \returns A 64-bit signed integer containing the converted value.
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtsd_si64(__m128d __a)
+{
+ return __builtin_ia32_cvtsd2si64((__v2df)__a);
+}
+
+/// \brief Converts the first (lower) element of a vector of [2 x double] into a
+/// 64-bit signed integer value, truncating the result when it is inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTTSD2SI / CVTTSD2SI instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
+/// conversion.
+/// \returns A 64-bit signed integer containing the converted value.
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvttsd_si64(__m128d __a)
+{
+ return __a[0];
+}
+#endif
+
+/// \brief Converts a vector of [4 x i32] into a vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTDQ2PS / CVTDQ2PS instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \returns A 128-bit vector of [4 x float] containing the converted values.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepi32_ps(__m128i __a)
+{
+ return __builtin_ia32_cvtdq2ps((__v4si)__a);
+}
+
+/// \brief Converts a vector of [4 x float] into a vector of [4 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTPS2DQ / CVTPS2DQ instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit integer vector of [4 x i32] containing the converted
+/// values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epi32(__m128 __a)
+{
+ return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
+}
+
+/// \brief Converts a vector of [4 x float] into a vector of [4 x i32],
+/// truncating the result when it is inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTTPS2DQ / CVTTPS2DQ instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x i32] containing the converted values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epi32(__m128 __a)
+{
+ return (__m128i)__builtin_convertvector((__v4sf)__a, __v4si);
+}
+
+/// \brief Returns a vector of [4 x i32] where the lowest element is the input
+/// operand and the remaining elements are zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+///
+/// \param __a
+/// A 32-bit signed integer operand.
+/// \returns A 128-bit vector of [4 x i32].
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsi32_si128(int __a)
+{
+ return (__m128i)(__v4si){ __a, 0, 0, 0 };
+}
+
+#ifdef __x86_64__
+/// \brief Returns a vector of [2 x i64] where the lower element is the input
+/// operand and the upper element is zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / MOVQ instruction.
+///
+/// \param __a
+/// A 64-bit signed integer operand containing the value to be converted.
+/// \returns A 128-bit vector of [2 x i64] containing the converted value.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsi64_si128(long long __a)
+{
+ return (__m128i){ __a, 0 };
+}
+#endif
+
+/// \brief Moves the least significant 32 bits of a vector of [4 x i32] to a
+/// 32-bit signed integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+///
+/// \param __a
+/// A vector of [4 x i32]. The least significant 32 bits are moved to the
+/// destination.
+/// \returns A 32-bit signed integer containing the moved value.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtsi128_si32(__m128i __a)
+{
+ __v4si __b = (__v4si)__a;
+ return __b[0];
+}
+
+#ifdef __x86_64__
+/// \brief Moves the least significant 64 bits of a vector of [2 x i64] to a
+/// 64-bit signed integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / MOVQ instruction.
+///
+/// \param __a
+/// A vector of [2 x i64]. The least significant 64 bits are moved to the
+/// destination.
+/// \returns A 64-bit signed integer containing the moved value.
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtsi128_si64(__m128i __a)
+{
+ return __a[0];
+}
+#endif
+
+/// \brief Moves packed integer values from an aligned 128-bit memory location
+/// to elements in a 128-bit integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVDQA / MOVDQA instruction.
+///
+/// \param __p
+/// An aligned pointer to a memory location containing integer values.
+/// \returns A 128-bit integer vector containing the moved values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_load_si128(__m128i const *__p)
+{
+ return *__p;
+}
+
+/// \brief Moves packed integer values from an unaligned 128-bit memory location
+/// to elements in a 128-bit integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVDQU / MOVDQU instruction.
+///
+/// \param __p
+/// A pointer to a memory location containing integer values.
+/// \returns A 128-bit integer vector containing the moved values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadu_si128(__m128i const *__p)
+{
+ struct __loadu_si128 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_si128*)__p)->__v;
+}
+
+/// \brief Returns a vector of [2 x i64] where the lower element is taken from
+/// the lower element of the operand, and the upper element is zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / MOVQ instruction.
+///
+/// \param __p
+/// A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of
+/// the destination.
+/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
+/// moved value. The higher order bits are cleared.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadl_epi64(__m128i const *__p)
+{
+ struct __mm_loadl_epi64_struct {
+ long long __u;
+ } __attribute__((__packed__, __may_alias__));
+ return (__m128i) { ((struct __mm_loadl_epi64_struct*)__p)->__u, 0};
+}
+
+/// \brief Generates a 128-bit vector of [4 x i32] with unspecified content.
+/// This could be used as an argument to another intrinsic function where the
+/// argument is required but the value is not actually used.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \returns A 128-bit vector of [4 x i32] with unspecified content.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_undefined_si128(void)
+{
+ return (__m128i)__builtin_ia32_undef128();
+}
+
+/// \brief Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
+/// the specified 64-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __q1
+/// A 64-bit integer value used to initialize the upper 64 bits of the
+/// destination vector of [2 x i64].
+/// \param __q0
+/// A 64-bit integer value used to initialize the lower 64 bits of the
+/// destination vector of [2 x i64].
+/// \returns An initialized 128-bit vector of [2 x i64] containing the values
+/// provided in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi64x(long long __q1, long long __q0)
+{
+ return (__m128i){ __q0, __q1 };
+}
+
+/// \brief Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
+/// the specified 64-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __q1
+/// A 64-bit integer value used to initialize the upper 64 bits of the
+/// destination vector of [2 x i64].
+/// \param __q0
+/// A 64-bit integer value used to initialize the lower 64 bits of the
+/// destination vector of [2 x i64].
+/// \returns An initialized 128-bit vector of [2 x i64] containing the values
+/// provided in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi64(__m64 __q1, __m64 __q0)
+{
+ return (__m128i){ (long long)__q0, (long long)__q1 };
+}
+
+/// \brief Initializes the 32-bit values in a 128-bit vector of [4 x i32] with
+/// the specified 32-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __i3
+/// A 32-bit integer value used to initialize bits [127:96] of the
+/// destination vector.
+/// \param __i2
+/// A 32-bit integer value used to initialize bits [95:64] of the destination
+/// vector.
+/// \param __i1
+/// A 32-bit integer value used to initialize bits [63:32] of the destination
+/// vector.
+/// \param __i0
+/// A 32-bit integer value used to initialize bits [31:0] of the destination
+/// vector.
+/// \returns An initialized 128-bit vector of [4 x i32] containing the values
+/// provided in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
+{
+ return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+}
+
+/// \brief Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
+/// the specified 16-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __w7
+/// A 16-bit integer value used to initialize bits [127:112] of the
+/// destination vector.
+/// \param __w6
+/// A 16-bit integer value used to initialize bits [111:96] of the
+/// destination vector.
+/// \param __w5
+/// A 16-bit integer value used to initialize bits [95:80] of the destination
+/// vector.
+/// \param __w4
+/// A 16-bit integer value used to initialize bits [79:64] of the destination
+/// vector.
+/// \param __w3
+/// A 16-bit integer value used to initialize bits [63:48] of the destination
+/// vector.
+/// \param __w2
+/// A 16-bit integer value used to initialize bits [47:32] of the destination
+/// vector.
+/// \param __w1
+/// A 16-bit integer value used to initialize bits [31:16] of the destination
+/// vector.
+/// \param __w0
+/// A 16-bit integer value used to initialize bits [15:0] of the destination
+/// vector.
+/// \returns An initialized 128-bit vector of [8 x i16] containing the values
+/// provided in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
+{
+ return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+}
+
+/// \brief Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
+/// the specified 8-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __b15
+/// Initializes bits [127:120] of the destination vector.
+/// \param __b14
+/// Initializes bits [119:112] of the destination vector.
+/// \param __b13
+/// Initializes bits [111:104] of the destination vector.
+/// \param __b12
+/// Initializes bits [103:96] of the destination vector.
+/// \param __b11
+/// Initializes bits [95:88] of the destination vector.
+/// \param __b10
+/// Initializes bits [87:80] of the destination vector.
+/// \param __b9
+/// Initializes bits [79:72] of the destination vector.
+/// \param __b8
+/// Initializes bits [71:64] of the destination vector.
+/// \param __b7
+/// Initializes bits [63:56] of the destination vector.
+/// \param __b6
+/// Initializes bits [55:48] of the destination vector.
+/// \param __b5
+/// Initializes bits [47:40] of the destination vector.
+/// \param __b4
+/// Initializes bits [39:32] of the destination vector.
+/// \param __b3
+/// Initializes bits [31:24] of the destination vector.
+/// \param __b2
+/// Initializes bits [23:16] of the destination vector.
+/// \param __b1
+/// Initializes bits [15:8] of the destination vector.
+/// \param __b0
+/// Initializes bits [7:0] of the destination vector.
+/// \returns An initialized 128-bit vector of [16 x i8] containing the values
+/// provided in the operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
+{
+ return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+}
+
+/// \brief Initializes both values in a 128-bit integer vector with the
+/// specified 64-bit integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __q
+/// Integer value used to initialize the elements of the destination integer
+/// vector.
+/// \returns An initialized 128-bit integer vector of [2 x i64] with both
+/// elements containing the value provided in the operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi64x(long long __q)
+{
+ return (__m128i){ __q, __q };
+}
+
+/// \brief Initializes both values in a 128-bit vector of [2 x i64] with the
+/// specified 64-bit value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __q
+/// A 64-bit value used to initialize the elements of the destination integer
+/// vector.
+/// \returns An initialized 128-bit vector of [2 x i64] with all elements
+/// containing the value provided in the operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi64(__m64 __q)
+{
+ return (__m128i){ (long long)__q, (long long)__q };
+}
+
+/// \brief Initializes all values in a 128-bit vector of [4 x i32] with the
+/// specified 32-bit value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __i
+/// A 32-bit value used to initialize the elements of the destination integer
+/// vector.
+/// \returns An initialized 128-bit vector of [4 x i32] with all elements
+/// containing the value provided in the operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi32(int __i)
+{
+ return (__m128i)(__v4si){ __i, __i, __i, __i };
+}
+
+/// \brief Initializes all values in a 128-bit vector of [8 x i16] with the
+/// specified 16-bit value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __w
+/// A 16-bit value used to initialize the elements of the destination integer
+/// vector.
+/// \returns An initialized 128-bit vector of [8 x i16] with all elements
+/// containing the value provided in the operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi16(short __w)
+{
+ return (__m128i)(__v8hi){ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+/// \brief Initializes all values in a 128-bit vector of [16 x i8] with the
+/// specified 8-bit value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __b
+/// An 8-bit value used to initialize the elements of the destination integer
+/// vector.
+/// \returns An initialized 128-bit vector of [16 x i8] with all elements
+/// containing the value provided in the operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi8(char __b)
+{
+ return (__m128i)(__v16qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi64(__m64 __q0, __m64 __q1)
+{
+ return (__m128i){ (long long)__q0, (long long)__q1 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
+{
+ return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
+{
+ return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
+{
+ return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setzero_si128(void)
+{
+ return (__m128i){ 0LL, 0LL };
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_si128(__m128i *__p, __m128i __b)
+{
+ *__p = __b;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_si128(__m128i *__p, __m128i __b)
+{
+ struct __storeu_si128 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si128*)__p)->__v = __b;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
+{
+ __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storel_epi64(__m128i *__p, __m128i __a)
+{
+ struct __mm_storel_epi64_struct {
+ long long __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_pd(double *__p, __m128d __a)
+{
+ __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_si128(__m128i *__p, __m128i __a)
+{
+ __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_si32(int *__p, int __a)
+{
+ __builtin_ia32_movnti(__p, __a);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_si64(long long *__p, long long __a)
+{
+ __builtin_ia32_movnti64(__p, __a);
+}
+#endif
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_clflush(void const *__p)
+{
+ __builtin_ia32_clflush(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_lfence(void)
+{
+ __builtin_ia32_lfence();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mfence(void)
+{
+ __builtin_ia32_mfence();
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packs_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packus_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_extract_epi16(__m128i __a, int __imm)
+{
+ __v8hi __b = (__v8hi)__a;
+ return (unsigned short)__b[__imm & 7];
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_insert_epi16(__m128i __a, int __b, int __imm)
+{
+ __v8hi __c = (__v8hi)__a;
+ __c[__imm & 7] = __b;
+ return (__m128i)__c;
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_epi8(__m128i __a)
+{
+ return __builtin_ia32_pmovmskb128((__v16qi)__a);
+}
+
+#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v4si)(__m128i)(a), \
+ (__v4si)_mm_undefined_si128(), \
+ ((imm) >> 0) & 0x3, ((imm) >> 2) & 0x3, \
+ ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); })
+
+#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
+ (__v8hi)_mm_undefined_si128(), \
+ ((imm) >> 0) & 0x3, ((imm) >> 2) & 0x3, \
+ ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3, \
+ 4, 5, 6, 7); })
+
+#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
+ (__v8hi)_mm_undefined_si128(), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) >> 0) & 0x3), \
+ 4 + (((imm) >> 2) & 0x3), \
+ 4 + (((imm) >> 4) & 0x3), \
+ 4 + (((imm) >> 6) & 0x3)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_movepi64_pi64(__m128i __a)
+{
+ return (__m64)__a[0];
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movpi64_epi64(__m64 __a)
+{
+ return (__m128i){ (long long)__a, 0 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_move_epi64(__m128i __a)
+{
+ return __builtin_shufflevector((__v2di)__a, (__m128i){ 0 }, 0, 2);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_unpackhi_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_unpacklo_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_pd(__m128d __a)
+{
+ return __builtin_ia32_movmskpd((__v2df)__a);
+}
+
+#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+ 0 + (((i) >> 0) & 0x1), \
+ 2 + (((i) >> 1) & 0x1)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_castpd_ps(__m128d __a)
+{
+ return (__m128)__a;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_castpd_si128(__m128d __a)
+{
+ return (__m128i)__a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_castps_pd(__m128 __a)
+{
+ return (__m128d)__a;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_castps_si128(__m128 __a)
+{
+ return (__m128i)__a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_castsi128_ps(__m128i __a)
+{
+ return (__m128)__a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_castsi128_pd(__m128i __a)
+{
+ return (__m128d)__a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_pause(void)
+{
+ __builtin_ia32_pause();
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
+
+#endif /* __EMMINTRIN_H */
diff --git a/current/clang-include/f16cintrin.h b/current/clang-include/f16cintrin.h
new file mode 100644
index 0000000..415bf73
--- /dev/null
+++ b/current/clang-include/f16cintrin.h
@@ -0,0 +1,124 @@
+/*===---- f16cintrin.h - F16C intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __EMMINTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <f16cintrin.h> directly; include <emmintrin.h> instead."
+#endif
+
+#ifndef __F16CINTRIN_H
+#define __F16CINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
+
+/// \brief Converts a 16-bit half-precision float value into a 32-bit float
+/// value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTPH2PS instruction.
+///
+/// \param __a
+/// A 16-bit half-precision float value.
+/// \returns The converted 32-bit float value.
+static __inline float __DEFAULT_FN_ATTRS
+_cvtsh_ss(unsigned short __a)
+{
+ __v8hi v = {(short)__a, 0, 0, 0, 0, 0, 0, 0};
+ __v4sf r = __builtin_ia32_vcvtph2ps(v);
+ return r[0];
+}
+
+/// \brief Converts a 32-bit single-precision float value to a 16-bit
+/// half-precision float value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned short _cvtss_sh(float a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTPS2PH instruction.
+///
+/// \param a
+/// A 32-bit single-precision float value to be converted to a 16-bit
+/// half-precision float value.
+/// \param imm
+/// An immediate value controlling rounding using bits [2:0]:
+/// 000: Nearest
+/// 001: Down
+/// 010: Up
+/// 011: Truncate
+/// 1XX: Use MXCSR.RC for rounding
+/// \returns The converted 16-bit half-precision float value.
+#define _cvtss_sh(a, imm) \
+ ((unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
+ (imm)))[0]))
+
+/// \brief Converts a 128-bit vector containing 32-bit float values into a
+/// 128-bit vector containing 16-bit half-precision float values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_cvtps_ph(__m128 a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTPS2PH instruction.
+///
+/// \param a
+/// A 128-bit vector containing 32-bit float values.
+/// \param imm
+/// An immediate value controlling rounding using bits [2:0]:
+/// 000: Nearest
+/// 001: Down
+/// 010: Up
+/// 011: Truncate
+/// 1XX: Use MXCSR.RC for rounding
+/// \returns A 128-bit vector containing converted 16-bit half-precision float
+/// values. The lower 64 bits are used to store the converted 16-bit
+/// half-precision floating-point values.
+#define _mm_cvtps_ph(a, imm) \
+ ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)))
+
+/// \brief Converts a 128-bit vector containing 16-bit half-precision float
+/// values into a 128-bit vector containing 32-bit float values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTPH2PS instruction.
+///
+/// \param __a
+/// A 128-bit vector containing 16-bit half-precision float values. The lower
+/// 64 bits are used in the conversion.
+/// \returns A 128-bit vector of [4 x float] containing converted float values.
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_cvtph_ps(__m128i __a)
+{
+ return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __F16CINTRIN_H */
diff --git a/current/clang-include/float.h b/current/clang-include/float.h
new file mode 100644
index 0000000..a28269e
--- /dev/null
+++ b/current/clang-include/float.h
@@ -0,0 +1,134 @@
+/*===---- float.h - Characteristics of floating point types ----------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __FLOAT_H
+#define __FLOAT_H
+
+/* If we're on MinGW, fall back to the system's float.h, which might have
+ * additional definitions provided for Windows.
+ * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
+ */
+#if (defined(__MINGW32__) || defined(_MSC_VER)) && __STDC_HOSTED__ && \
+ __has_include_next(<float.h>)
+# include_next <float.h>
+
+/* Undefine anything that we'll be redefining below. */
+# undef FLT_EVAL_METHOD
+# undef FLT_ROUNDS
+# undef FLT_RADIX
+# undef FLT_MANT_DIG
+# undef DBL_MANT_DIG
+# undef LDBL_MANT_DIG
+# if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__)
+# undef DECIMAL_DIG
+# endif
+# undef FLT_DIG
+# undef DBL_DIG
+# undef LDBL_DIG
+# undef FLT_MIN_EXP
+# undef DBL_MIN_EXP
+# undef LDBL_MIN_EXP
+# undef FLT_MIN_10_EXP
+# undef DBL_MIN_10_EXP
+# undef LDBL_MIN_10_EXP
+# undef FLT_MAX_EXP
+# undef DBL_MAX_EXP
+# undef LDBL_MAX_EXP
+# undef FLT_MAX_10_EXP
+# undef DBL_MAX_10_EXP
+# undef LDBL_MAX_10_EXP
+# undef FLT_MAX
+# undef DBL_MAX
+# undef LDBL_MAX
+# undef FLT_EPSILON
+# undef DBL_EPSILON
+# undef LDBL_EPSILON
+# undef FLT_MIN
+# undef DBL_MIN
+# undef LDBL_MIN
+# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# undef FLT_TRUE_MIN
+# undef DBL_TRUE_MIN
+# undef LDBL_TRUE_MIN
+# undef FLT_DECIMAL_DIG
+# undef DBL_DECIMAL_DIG
+# undef LDBL_DECIMAL_DIG
+# endif
+#endif
+
+/* Characteristics of floating point types, C99 5.2.4.2.2 */
+
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+#define FLT_ROUNDS (__builtin_flt_rounds())
+#define FLT_RADIX __FLT_RADIX__
+
+#define FLT_MANT_DIG __FLT_MANT_DIG__
+#define DBL_MANT_DIG __DBL_MANT_DIG__
+#define LDBL_MANT_DIG __LDBL_MANT_DIG__
+
+#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__)
+# define DECIMAL_DIG __DECIMAL_DIG__
+#endif
+
+#define FLT_DIG __FLT_DIG__
+#define DBL_DIG __DBL_DIG__
+#define LDBL_DIG __LDBL_DIG__
+
+#define FLT_MIN_EXP __FLT_MIN_EXP__
+#define DBL_MIN_EXP __DBL_MIN_EXP__
+#define LDBL_MIN_EXP __LDBL_MIN_EXP__
+
+#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
+#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
+#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+
+#define FLT_MAX_EXP __FLT_MAX_EXP__
+#define DBL_MAX_EXP __DBL_MAX_EXP__
+#define LDBL_MAX_EXP __LDBL_MAX_EXP__
+
+#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
+#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
+#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+
+#define FLT_MAX __FLT_MAX__
+#define DBL_MAX __DBL_MAX__
+#define LDBL_MAX __LDBL_MAX__
+
+#define FLT_EPSILON __FLT_EPSILON__
+#define DBL_EPSILON __DBL_EPSILON__
+#define LDBL_EPSILON __LDBL_EPSILON__
+
+#define FLT_MIN __FLT_MIN__
+#define DBL_MIN __DBL_MIN__
+#define LDBL_MIN __LDBL_MIN__
+
+#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# define FLT_TRUE_MIN __FLT_DENORM_MIN__
+# define DBL_TRUE_MIN __DBL_DENORM_MIN__
+# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+# define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
+# define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
+# define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__
+#endif
+
+#endif /* __FLOAT_H */
diff --git a/current/clang-include/fma4intrin.h b/current/clang-include/fma4intrin.h
new file mode 100644
index 0000000..11aa8ce
--- /dev/null
+++ b/current/clang-include/fma4intrin.h
@@ -0,0 +1,230 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __FMA4INTRIN_H
+#define __FMA4INTRIN_H
+
+#include <pmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma4")))
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __FMA4INTRIN_H */
diff --git a/current/clang-include/fmaintrin.h b/current/clang-include/fmaintrin.h
new file mode 100644
index 0000000..0e2ef0b
--- /dev/null
+++ b/current/clang-include/fmaintrin.h
@@ -0,0 +1,228 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <fmaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __FMAINTRIN_H
+#define __FMAINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma")))
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __FMAINTRIN_H */
diff --git a/current/clang-include/fxsrintrin.h b/current/clang-include/fxsrintrin.h
new file mode 100644
index 0000000..ac6026a
--- /dev/null
+++ b/current/clang-include/fxsrintrin.h
@@ -0,0 +1,55 @@
+/*===---- fxsrintrin.h - FXSR intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <fxsrintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __FXSRINTRIN_H
+#define __FXSRINTRIN_H
+
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fxsr")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxsave(void *__p) {
+ return __builtin_ia32_fxsave(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxsave64(void *__p) {
+ return __builtin_ia32_fxsave64(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxrstor(void *__p) {
+ return __builtin_ia32_fxrstor(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxrstor64(void *__p) {
+ return __builtin_ia32_fxrstor64(__p);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/htmintrin.h b/current/clang-include/htmintrin.h
new file mode 100644
index 0000000..69c8d7b
--- /dev/null
+++ b/current/clang-include/htmintrin.h
@@ -0,0 +1,226 @@
+/*===---- htmintrin.h - Standard header for PowerPC HTM ---------------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __HTMINTRIN_H
+#define __HTMINTRIN_H
+
+#ifndef __HTM__
+#error "HTM instruction set not enabled"
+#endif
+
+#ifdef __powerpc__
+
+#include <stdint.h>
+
+typedef uint64_t texasr_t;
+typedef uint32_t texasru_t;
+typedef uint32_t texasrl_t;
+typedef uintptr_t tfiar_t;
+typedef uintptr_t tfhar_t;
+
+#define _HTM_STATE(CR0) ((CR0 >> 1) & 0x3)
+#define _HTM_NONTRANSACTIONAL 0x0
+#define _HTM_SUSPENDED 0x1
+#define _HTM_TRANSACTIONAL 0x2
+
+#define _TEXASR_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \
+ (((TEXASR) >> (63-(BITNUM))) & ((1<<(SIZE))-1))
+#define _TEXASRU_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \
+ (((TEXASR) >> (31-(BITNUM))) & ((1<<(SIZE))-1))
+
+#define _TEXASR_FAILURE_CODE(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 7, 8)
+#define _TEXASRU_FAILURE_CODE(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 8)
+
+#define _TEXASR_FAILURE_PERSISTENT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 7, 1)
+#define _TEXASRU_FAILURE_PERSISTENT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 1)
+
+#define _TEXASR_DISALLOWED(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 8, 1)
+#define _TEXASRU_DISALLOWED(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 8, 1)
+
+#define _TEXASR_NESTING_OVERFLOW(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 9, 1)
+#define _TEXASRU_NESTING_OVERFLOW(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 9, 1)
+
+#define _TEXASR_FOOTPRINT_OVERFLOW(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 10, 1)
+#define _TEXASRU_FOOTPRINT_OVERFLOW(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 10, 1)
+
+#define _TEXASR_SELF_INDUCED_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 11, 1)
+#define _TEXASRU_SELF_INDUCED_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 11, 1)
+
+#define _TEXASR_NON_TRANSACTIONAL_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 12, 1)
+#define _TEXASRU_NON_TRANSACTIONAL_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 12, 1)
+
+#define _TEXASR_TRANSACTION_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 13, 1)
+#define _TEXASRU_TRANSACTION_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 13, 1)
+
+#define _TEXASR_TRANSLATION_INVALIDATION_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 14, 1)
+#define _TEXASRU_TRANSLATION_INVALIDATION_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 14, 1)
+
+#define _TEXASR_IMPLEMENTAION_SPECIFIC(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 15, 1)
+#define _TEXASRU_IMPLEMENTAION_SPECIFIC(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 15, 1)
+
+#define _TEXASR_INSTRUCTION_FETCH_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 16, 1)
+#define _TEXASRU_INSTRUCTION_FETCH_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 16, 1)
+
+#define _TEXASR_ABORT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 31, 1)
+#define _TEXASRU_ABORT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 31, 1)
+
+
+#define _TEXASR_SUSPENDED(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 32, 1)
+
+#define _TEXASR_PRIVILEGE(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 35, 2)
+
+#define _TEXASR_FAILURE_SUMMARY(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 36, 1)
+
+#define _TEXASR_TFIAR_EXACT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 37, 1)
+
+#define _TEXASR_ROT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 38, 1)
+
+#define _TEXASR_TRANSACTION_LEVEL(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 63, 12)
+
+#endif /* __powerpc */
+
+#ifdef __s390__
+
+/* Condition codes generated by tbegin */
+#define _HTM_TBEGIN_STARTED 0
+#define _HTM_TBEGIN_INDETERMINATE 1
+#define _HTM_TBEGIN_TRANSIENT 2
+#define _HTM_TBEGIN_PERSISTENT 3
+
+/* The abort codes below this threshold are reserved for machine use. */
+#define _HTM_FIRST_USER_ABORT_CODE 256
+
+/* The transaction diagnostic block is it is defined in the Principles
+ of Operation chapter 5-91. */
+
+struct __htm_tdb {
+ unsigned char format; /* 0 */
+ unsigned char flags;
+ unsigned char reserved1[4];
+ unsigned short nesting_depth;
+ unsigned long long abort_code; /* 8 */
+ unsigned long long conflict_token; /* 16 */
+ unsigned long long atia; /* 24 */
+ unsigned char eaid; /* 32 */
+ unsigned char dxc;
+ unsigned char reserved2[2];
+ unsigned int program_int_id;
+ unsigned long long exception_id; /* 40 */
+ unsigned long long bea; /* 48 */
+ unsigned char reserved3[72]; /* 56 */
+ unsigned long long gprs[16]; /* 128 */
+} __attribute__((__packed__, __aligned__ (8)));
+
+
+/* Helper intrinsics to retry tbegin in case of transient failure. */
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_null (int __retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin(0)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < __retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_tdb (void *__tdb, int __retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin(__tdb)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < __retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+#define __builtin_tbegin_retry(tdb, retry) \
+ (__builtin_constant_p(tdb == 0) && tdb == 0 ? \
+ __builtin_tbegin_retry_null(retry) : \
+ __builtin_tbegin_retry_tdb(tdb, retry))
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_nofloat_null (int __retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin_nofloat(0)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < __retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_nofloat_tdb (void *__tdb, int __retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin_nofloat(__tdb)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < __retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+#define __builtin_tbegin_retry_nofloat(tdb, retry) \
+ (__builtin_constant_p(tdb == 0) && tdb == 0 ? \
+ __builtin_tbegin_retry_nofloat_null(retry) : \
+ __builtin_tbegin_retry_nofloat_tdb(tdb, retry))
+
+#endif /* __s390__ */
+
+#endif /* __HTMINTRIN_H */
diff --git a/current/clang-include/htmxlintrin.h b/current/clang-include/htmxlintrin.h
new file mode 100644
index 0000000..16dc705
--- /dev/null
+++ b/current/clang-include/htmxlintrin.h
@@ -0,0 +1,363 @@
+/*===---- htmxlintrin.h - XL compiler HTM execution intrinsics-------------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __HTMXLINTRIN_H
+#define __HTMXLINTRIN_H
+
+#ifndef __HTM__
+#error "HTM instruction set not enabled"
+#endif
+
+#include <htmintrin.h>
+
+#ifdef __powerpc__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define _TEXASR_PTR(TM_BUF) \
+ ((texasr_t *)((TM_BUF)+0))
+#define _TEXASRU_PTR(TM_BUF) \
+ ((texasru_t *)((TM_BUF)+0))
+#define _TEXASRL_PTR(TM_BUF) \
+ ((texasrl_t *)((TM_BUF)+4))
+#define _TFIAR_PTR(TM_BUF) \
+ ((tfiar_t *)((TM_BUF)+8))
+
+typedef char TM_buff_type[16];
+
+/* This macro can be used to determine whether a transaction was successfully
+ started from the __TM_begin() and __TM_simple_begin() intrinsic functions
+ below. */
+#define _HTM_TBEGIN_STARTED 1
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_simple_begin (void)
+{
+ if (__builtin_expect (__builtin_tbegin (0), 1))
+ return _HTM_TBEGIN_STARTED;
+ return 0;
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_begin (void* const __TM_buff)
+{
+ *_TEXASRL_PTR (__TM_buff) = 0;
+ if (__builtin_expect (__builtin_tbegin (0), 1))
+ return _HTM_TBEGIN_STARTED;
+#ifdef __powerpc64__
+ *_TEXASR_PTR (__TM_buff) = __builtin_get_texasr ();
+#else
+ *_TEXASRU_PTR (__TM_buff) = __builtin_get_texasru ();
+ *_TEXASRL_PTR (__TM_buff) = __builtin_get_texasr ();
+#endif
+ *_TFIAR_PTR (__TM_buff) = __builtin_get_tfiar ();
+ return 0;
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_end (void)
+{
+ if (__builtin_expect (__builtin_tend (0), 1))
+ return 1;
+ return 0;
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_abort (void)
+{
+ __builtin_tabort (0);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_named_abort (unsigned char const __code)
+{
+ __builtin_tabort (__code);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_resume (void)
+{
+ __builtin_tresume ();
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_suspend (void)
+{
+ __builtin_tsuspend ();
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_user_abort (void* const __TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
+ return _TEXASRU_ABORT (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_named_user_abort (void* const __TM_buff, unsigned char *__code)
+{
+ texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
+
+ *__code = _TEXASRU_FAILURE_CODE (texasru);
+ return _TEXASRU_ABORT (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_illegal (void* const __TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
+ return _TEXASRU_DISALLOWED (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_footprint_exceeded (void* const __TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
+ return _TEXASRU_FOOTPRINT_OVERFLOW (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_nesting_depth (void* const __TM_buff)
+{
+ texasrl_t texasrl;
+
+ if (_HTM_STATE (__builtin_ttest ()) == _HTM_NONTRANSACTIONAL)
+ {
+ texasrl = *_TEXASRL_PTR (__TM_buff);
+ if (!_TEXASR_FAILURE_SUMMARY (texasrl))
+ texasrl = 0;
+ }
+ else
+ texasrl = (texasrl_t) __builtin_get_texasr ();
+
+ return _TEXASR_TRANSACTION_LEVEL (texasrl);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_nested_too_deep(void* const __TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
+ return _TEXASRU_NESTING_OVERFLOW (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_conflict(void* const __TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ /* Return TEXASR bits 11 (Self-Induced Conflict) through
+ 14 (Translation Invalidation Conflict). */
+ return (_TEXASRU_EXTRACT_BITS (texasru, 14, 4)) ? 1 : 0;
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_failure_persistent(void* const __TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (__TM_buff);
+ return _TEXASRU_FAILURE_PERSISTENT (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_failure_address(void* const __TM_buff)
+{
+ return *_TFIAR_PTR (__TM_buff);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_failure_code(void* const __TM_buff)
+{
+ return *_TEXASR_PTR (__TM_buff);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __powerpc__ */
+
+#ifdef __s390__
+
+#include <stdint.h>
+
+/* These intrinsics are being made available for compatibility with
+ the IBM XL compiler. For documentation please see the "z/OS XL
+ C/C++ Programming Guide" publically available on the web. */
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_simple_begin ()
+{
+ return __builtin_tbegin_nofloat (0);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_begin (void* const __tdb)
+{
+ return __builtin_tbegin_nofloat (__tdb);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_end ()
+{
+ return __builtin_tend ();
+}
+
+static __inline void __attribute__((__always_inline__))
+__TM_abort ()
+{
+ return __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+__TM_named_abort (unsigned char const __code)
+{
+ return __builtin_tabort ((int)_HTM_FIRST_USER_ABORT_CODE + __code);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+__TM_non_transactional_store (void* const __addr, long long const __value)
+{
+ __builtin_non_tx_store ((uint64_t*)__addr, (uint64_t)__value);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_nesting_depth (void* const __tdb_ptr)
+{
+ int depth = __builtin_tx_nesting_depth ();
+ struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
+
+ if (depth != 0)
+ return depth;
+
+ if (tdb->format != 1)
+ return 0;
+ return tdb->nesting_depth;
+}
+
+/* Transaction failure diagnostics */
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_user_abort (void* const __tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
+
+ if (tdb->format != 1)
+ return 0;
+
+ return !!(tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_named_user_abort (void* const __tdb_ptr, unsigned char* __code)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
+
+ if (tdb->format != 1)
+ return 0;
+
+ if (tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE)
+ {
+ *__code = tdb->abort_code - _HTM_FIRST_USER_ABORT_CODE;
+ return 1;
+ }
+ return 0;
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_illegal (void* const __tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
+
+ return (tdb->format == 1
+ && (tdb->abort_code == 4 /* unfiltered program interruption */
+ || tdb->abort_code == 11 /* restricted instruction */));
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_footprint_exceeded (void* const __tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
+
+ return (tdb->format == 1
+ && (tdb->abort_code == 7 /* fetch overflow */
+ || tdb->abort_code == 8 /* store overflow */));
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_nested_too_deep (void* const __tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
+
+ return tdb->format == 1 && tdb->abort_code == 13; /* depth exceeded */
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_conflict (void* const __tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
+
+ return (tdb->format == 1
+ && (tdb->abort_code == 9 /* fetch conflict */
+ || tdb->abort_code == 10 /* store conflict */));
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_failure_persistent (long const __result)
+{
+ return __result == _HTM_TBEGIN_PERSISTENT;
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_failure_address (void* const __tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
+ return tdb->atia;
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_failure_code (void* const __tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;
+
+ return tdb->abort_code;
+}
+
+#endif /* __s390__ */
+
+#endif /* __HTMXLINTRIN_H */
diff --git a/current/clang-include/ia32intrin.h b/current/clang-include/ia32intrin.h
new file mode 100644
index 0000000..397f3fd
--- /dev/null
+++ b/current/clang-include/ia32intrin.h
@@ -0,0 +1,79 @@
+/* ===-------- ia32intrin.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <ia32intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __IA32INTRIN_H
+#define __IA32INTRIN_H
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__readeflags(void)
+{
+ return __builtin_ia32_readeflags_u64();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__writeeflags(unsigned long long __f)
+{
+ __builtin_ia32_writeeflags_u64(__f);
+}
+
+#else /* !__x86_64__ */
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__readeflags(void)
+{
+ return __builtin_ia32_readeflags_u32();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__writeeflags(unsigned int __f)
+{
+ __builtin_ia32_writeeflags_u32(__f);
+}
+#endif /* !__x86_64__ */
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdpmc(int __A) {
+ return __builtin_ia32_rdpmc(__A);
+}
+
+/* __rdtsc */
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdtsc(void) {
+ return __builtin_ia32_rdtsc();
+}
+
+/* __rdtscp */
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdtscp(unsigned int *__A) {
+ return __builtin_ia32_rdtscp(__A);
+}
+
+#define _rdtsc() __rdtsc()
+
+#define _rdpmc(A) __rdpmc(A)
+
+#endif /* __IA32INTRIN_H */
diff --git a/current/clang-include/immintrin.h b/current/clang-include/immintrin.h
new file mode 100644
index 0000000..4b27523
--- /dev/null
+++ b/current/clang-include/immintrin.h
@@ -0,0 +1,283 @@
+/*===---- immintrin.h - Intel intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#define __IMMINTRIN_H
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MMX__)
+#include <mmintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE__)
+#include <xmmintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE2__)
+#include <emmintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE3__)
+#include <pmmintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSSE3__)
+#include <tmmintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ (defined(__SSE4_2__) || defined(__SSE4_1__))
+#include <smmintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ (defined(__AES__) || defined(__PCLMUL__))
+#include <wmmintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLFLUSHOPT__)
+#include <clflushoptintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX__)
+#include <avxintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX2__)
+#include <avx2intrin.h>
+
+/* The 256-bit versions of functions in f16cintrin.h.
+ Intel documents these as being in immintrin.h, and
+ they depend on typedefs from avxintrin.h. */
+
+#define _mm256_cvtps_ph(a, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)); })
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
+_mm256_cvtph_ps(__m128i __a)
+{
+ return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
+}
+#endif /* __AVX2__ */
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
+#include <bmiintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
+#include <bmi2intrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
+#include <lzcntintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA__)
+#include <fmaintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512F__)
+#include <avx512fintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VL__)
+#include <avx512vlintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BW__)
+#include <avx512bwintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512CD__)
+#include <avx512cdintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512DQ__)
+#include <avx512dqintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ (defined(__AVX512VL__) && defined(__AVX512BW__))
+#include <avx512vlbwintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ (defined(__AVX512VL__) && defined(__AVX512CD__))
+#include <avx512vlcdintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ (defined(__AVX512VL__) && defined(__AVX512DQ__))
+#include <avx512vldqintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512ER__)
+#include <avx512erintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512IFMA__)
+#include <avx512ifmaintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ (defined(__AVX512IFMA__) && defined(__AVX512VL__))
+#include <avx512ifmavlintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI__)
+#include <avx512vbmiintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ (defined(__AVX512VBMI__) && defined(__AVX512VL__))
+#include <avx512vbmivlintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512PF__)
+#include <avx512pfintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PKU__)
+#include <pkuintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDRND__)
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand16_step(unsigned short *__p)
+{
+ return __builtin_ia32_rdrand16_step(__p);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand32_step(unsigned int *__p)
+{
+ return __builtin_ia32_rdrand32_step(__p);
+}
+
+/* __bit_scan_forward */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_bit_scan_forward(int __A) {
+ return __builtin_ctz(__A);
+}
+
+/* __bit_scan_reverse */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_bit_scan_reverse(int __A) {
+ return 31 - __builtin_clz(__A);
+}
+
+#ifdef __x86_64__
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand64_step(unsigned long long *__p)
+{
+ return __builtin_ia32_rdrand64_step(__p);
+}
+#endif
+#endif /* __RDRND__ */
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FSGSBASE__)
+#ifdef __x86_64__
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readfsbase_u32(void)
+{
+ return __builtin_ia32_rdfsbase32();
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readfsbase_u64(void)
+{
+ return __builtin_ia32_rdfsbase64();
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readgsbase_u32(void)
+{
+ return __builtin_ia32_rdgsbase32();
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readgsbase_u64(void)
+{
+ return __builtin_ia32_rdgsbase64();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writefsbase_u32(unsigned int __V)
+{
+ return __builtin_ia32_wrfsbase32(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writefsbase_u64(unsigned long long __V)
+{
+ return __builtin_ia32_wrfsbase64(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writegsbase_u32(unsigned int __V)
+{
+ return __builtin_ia32_wrgsbase32(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writegsbase_u64(unsigned long long __V)
+{
+ return __builtin_ia32_wrgsbase64(__V);
+}
+
+#endif
+#endif /* __FSGSBASE__ */
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RTM__)
+#include <rtmintrin.h>
+#include <xtestintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHA__)
+#include <shaintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FXSR__)
+#include <fxsrintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVE__)
+#include <xsaveintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEOPT__)
+#include <xsaveoptintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEC__)
+#include <xsavecintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVES__)
+#include <xsavesintrin.h>
+#endif
+
+/* Some intrinsics inside adxintrin.h are available only on processors with ADX,
+ * whereas others are also available at all times. */
+#include <adxintrin.h>
+
+#endif /* __IMMINTRIN_H */
diff --git a/current/clang-include/intrin.h b/current/clang-include/intrin.h
new file mode 100644
index 0000000..f18711a
--- /dev/null
+++ b/current/clang-include/intrin.h
@@ -0,0 +1,957 @@
+/* ===-------- intrin.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we're compiling for the windows platform. */
+#ifndef _MSC_VER
+#include_next <intrin.h>
+#else
+
+#ifndef __INTRIN_H
+#define __INTRIN_H
+
+/* First include the standard intrinsics. */
+#if defined(__i386__) || defined(__x86_64__)
+#include <x86intrin.h>
+#endif
+
+/* For the definition of jmp_buf. */
+#if __STDC_HOSTED__
+#include <setjmp.h>
+#endif
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__MMX__)
+/* And the random ones that aren't in those files. */
+__m64 _m_from_float(float);
+float _m_to_float(__m64);
+#endif
+
+/* Other assorted instruction intrinsics. */
+void __addfsbyte(unsigned long, unsigned char);
+void __addfsdword(unsigned long, unsigned long);
+void __addfsword(unsigned long, unsigned short);
+void __code_seg(const char *);
+static __inline__
+void __cpuid(int[4], int);
+static __inline__
+void __cpuidex(int[4], int, int);
+void __debugbreak(void);
+__int64 __emul(int, int);
+unsigned __int64 __emulu(unsigned int, unsigned int);
+void __cdecl __fastfail(unsigned int);
+unsigned int __getcallerseflags(void);
+static __inline__
+void __halt(void);
+unsigned char __inbyte(unsigned short);
+void __inbytestring(unsigned short, unsigned char *, unsigned long);
+void __incfsbyte(unsigned long);
+void __incfsdword(unsigned long);
+void __incfsword(unsigned long);
+unsigned long __indword(unsigned short);
+void __indwordstring(unsigned short, unsigned long *, unsigned long);
+void __int2c(void);
+void __invlpg(void *);
+unsigned short __inword(unsigned short);
+void __inwordstring(unsigned short, unsigned short *, unsigned long);
+void __lidt(void *);
+unsigned __int64 __ll_lshift(unsigned __int64, int);
+__int64 __ll_rshift(__int64, int);
+void __llwpcb(void *);
+unsigned char __lwpins32(unsigned int, unsigned int, unsigned int);
+void __lwpval32(unsigned int, unsigned int, unsigned int);
+unsigned int __lzcnt(unsigned int);
+unsigned short __lzcnt16(unsigned short);
+static __inline__
+void __movsb(unsigned char *, unsigned char const *, size_t);
+static __inline__
+void __movsd(unsigned long *, unsigned long const *, size_t);
+static __inline__
+void __movsw(unsigned short *, unsigned short const *, size_t);
+void __nop(void);
+void __nvreg_restore_fence(void);
+void __nvreg_save_fence(void);
+void __outbyte(unsigned short, unsigned char);
+void __outbytestring(unsigned short, unsigned char *, unsigned long);
+void __outdword(unsigned short, unsigned long);
+void __outdwordstring(unsigned short, unsigned long *, unsigned long);
+void __outword(unsigned short, unsigned short);
+void __outwordstring(unsigned short, unsigned short *, unsigned long);
+static __inline__
+unsigned int __popcnt(unsigned int);
+static __inline__
+unsigned short __popcnt16(unsigned short);
+unsigned long __readcr0(void);
+unsigned long __readcr2(void);
+static __inline__
+unsigned long __readcr3(void);
+unsigned long __readcr4(void);
+unsigned long __readcr8(void);
+unsigned int __readdr(unsigned int);
+#ifdef __i386__
+static __inline__
+unsigned char __readfsbyte(unsigned long);
+static __inline__
+unsigned long __readfsdword(unsigned long);
+static __inline__
+unsigned __int64 __readfsqword(unsigned long);
+static __inline__
+unsigned short __readfsword(unsigned long);
+#endif
+static __inline__
+unsigned __int64 __readmsr(unsigned long);
+unsigned __int64 __readpmc(unsigned long);
+unsigned long __segmentlimit(unsigned long);
+void __sidt(void *);
+void *__slwpcb(void);
+static __inline__
+void __stosb(unsigned char *, unsigned char, size_t);
+static __inline__
+void __stosd(unsigned long *, unsigned long, size_t);
+static __inline__
+void __stosw(unsigned short *, unsigned short, size_t);
+void __svm_clgi(void);
+void __svm_invlpga(void *, int);
+void __svm_skinit(int);
+void __svm_stgi(void);
+void __svm_vmload(size_t);
+void __svm_vmrun(size_t);
+void __svm_vmsave(size_t);
+void __ud2(void);
+unsigned __int64 __ull_rshift(unsigned __int64, int);
+void __vmx_off(void);
+void __vmx_vmptrst(unsigned __int64 *);
+void __wbinvd(void);
+void __writecr0(unsigned int);
+static __inline__
+void __writecr3(unsigned int);
+void __writecr4(unsigned int);
+void __writecr8(unsigned int);
+void __writedr(unsigned int, unsigned int);
+void __writefsbyte(unsigned long, unsigned char);
+void __writefsdword(unsigned long, unsigned long);
+void __writefsqword(unsigned long, unsigned __int64);
+void __writefsword(unsigned long, unsigned short);
+void __writemsr(unsigned long, unsigned __int64);
+static __inline__
+void *_AddressOfReturnAddress(void);
+static __inline__
+unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
+static __inline__
+unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
+static __inline__
+unsigned char _bittest(long const *, long);
+static __inline__
+unsigned char _bittestandcomplement(long *, long);
+static __inline__
+unsigned char _bittestandreset(long *, long);
+static __inline__
+unsigned char _bittestandset(long *, long);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
+unsigned long __cdecl _byteswap_ulong(unsigned long);
+unsigned short __cdecl _byteswap_ushort(unsigned short);
+void __cdecl _disable(void);
+void __cdecl _enable(void);
+long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);
+static __inline__
+long _InterlockedAnd(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedAnd16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedAnd8(char volatile *_Value, char _Mask);
+unsigned char _interlockedbittestandreset(long volatile *, long);
+static __inline__
+unsigned char _interlockedbittestandset(long volatile *, long);
+static __inline__
+long __cdecl _InterlockedCompareExchange(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_HLEAcquire(long volatile *, long, long);
+long _InterlockedCompareExchange_HLERelease(long volatile *, long, long);
+static __inline__
+short _InterlockedCompareExchange16(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+static __inline__
+__int64 _InterlockedCompareExchange64(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedcompareExchange64_HLEAcquire(__int64 volatile *, __int64,
+ __int64);
+__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
+ __int64);
+static __inline__
+char _InterlockedCompareExchange8(char volatile *_Destination, char _Exchange,
+ char _Comparand);
+void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,
+ void *);
+void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,
+ void *);
+static __inline__
+long __cdecl _InterlockedDecrement(long volatile *_Addend);
+static __inline__
+short _InterlockedDecrement16(short volatile *_Addend);
+long _InterlockedExchange(long volatile *_Target, long _Value);
+static __inline__
+short _InterlockedExchange16(short volatile *_Target, short _Value);
+static __inline__
+char _InterlockedExchange8(char volatile *_Target, char _Value);
+static __inline__
+long __cdecl _InterlockedExchangeAdd(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
+long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
+static __inline__
+short _InterlockedExchangeAdd16(short volatile *_Addend, short _Value);
+__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
+__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
+static __inline__
+char _InterlockedExchangeAdd8(char volatile *_Addend, char _Value);
+static __inline__
+long __cdecl _InterlockedIncrement(long volatile *_Addend);
+static __inline__
+short _InterlockedIncrement16(short volatile *_Addend);
+static __inline__
+long _InterlockedOr(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedOr16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedOr8(char volatile *_Value, char _Mask);
+static __inline__
+long _InterlockedXor(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedXor16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedXor8(char volatile *_Value, char _Mask);
+void __cdecl _invpcid(unsigned int, void *);
+static __inline__
+unsigned long __cdecl _lrotl(unsigned long, int);
+static __inline__
+unsigned long __cdecl _lrotr(unsigned long, int);
+static __inline__
+void _ReadBarrier(void);
+static __inline__
+void _ReadWriteBarrier(void);
+static __inline__
+void *_ReturnAddress(void);
+unsigned int _rorx_u32(unsigned int, const unsigned int);
+static __inline__
+unsigned int __cdecl _rotl(unsigned int _Value, int _Shift);
+static __inline__
+unsigned short _rotl16(unsigned short _Value, unsigned char _Shift);
+static __inline__
+unsigned __int64 __cdecl _rotl64(unsigned __int64 _Value, int _Shift);
+static __inline__
+unsigned char _rotl8(unsigned char _Value, unsigned char _Shift);
+static __inline__
+unsigned int __cdecl _rotr(unsigned int _Value, int _Shift);
+static __inline__
+unsigned short _rotr16(unsigned short _Value, unsigned char _Shift);
+static __inline__
+unsigned __int64 __cdecl _rotr64(unsigned __int64 _Value, int _Shift);
+static __inline__
+unsigned char _rotr8(unsigned char _Value, unsigned char _Shift);
+int _sarx_i32(int, unsigned int);
+#if __STDC_HOSTED__
+int __cdecl _setjmp(jmp_buf);
+#endif
+unsigned int _shlx_u32(unsigned int, unsigned int);
+unsigned int _shrx_u32(unsigned int, unsigned int);
+void _Store_HLERelease(long volatile *, long);
+void _Store64_HLERelease(__int64 volatile *, __int64);
+void _StorePointer_HLERelease(void *volatile *, void *);
+static __inline__
+void _WriteBarrier(void);
+unsigned __int32 xbegin(void);
+void _xend(void);
+static __inline__
+#define _XCR_XFEATURE_ENABLED_MASK 0
+unsigned __int64 __cdecl _xgetbv(unsigned int);
+void __cdecl _xsetbv(unsigned int, unsigned __int64);
+
+/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
+#ifdef __x86_64__
+void __addgsbyte(unsigned long, unsigned char);
+void __addgsdword(unsigned long, unsigned long);
+void __addgsqword(unsigned long, unsigned __int64);
+void __addgsword(unsigned long, unsigned short);
+static __inline__
+void __faststorefence(void);
+void __incgsbyte(unsigned long);
+void __incgsdword(unsigned long);
+void __incgsqword(unsigned long);
+void __incgsword(unsigned long);
+unsigned char __lwpins64(unsigned __int64, unsigned int, unsigned int);
+void __lwpval64(unsigned __int64, unsigned int, unsigned int);
+unsigned __int64 __lzcnt64(unsigned __int64);
+static __inline__
+void __movsq(unsigned long long *, unsigned long long const *, size_t);
+__int64 __mulh(__int64, __int64);
+static __inline__
+unsigned __int64 __popcnt64(unsigned __int64);
+static __inline__
+unsigned char __readgsbyte(unsigned long);
+static __inline__
+unsigned long __readgsdword(unsigned long);
+static __inline__
+unsigned __int64 __readgsqword(unsigned long);
+unsigned short __readgsword(unsigned long);
+unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
+ unsigned __int64 _HighPart,
+ unsigned char _Shift);
+unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
+ unsigned __int64 _HighPart,
+ unsigned char _Shift);
+static __inline__
+void __stosq(unsigned __int64 *, unsigned __int64, size_t);
+unsigned char __vmx_on(unsigned __int64 *);
+unsigned char __vmx_vmclear(unsigned __int64 *);
+unsigned char __vmx_vmlaunch(void);
+unsigned char __vmx_vmptrld(unsigned __int64 *);
+unsigned char __vmx_vmread(size_t, size_t *);
+unsigned char __vmx_vmresume(void);
+unsigned char __vmx_vmwrite(size_t, size_t);
+void __writegsbyte(unsigned long, unsigned char);
+void __writegsdword(unsigned long, unsigned long);
+void __writegsqword(unsigned long, unsigned __int64);
+void __writegsword(unsigned long, unsigned short);
+static __inline__
+unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _bittest64(__int64 const *, __int64);
+static __inline__
+unsigned char _bittestandcomplement64(__int64 *, __int64);
+static __inline__
+unsigned char _bittestandreset64(__int64 *, __int64);
+static __inline__
+unsigned char _bittestandset64(__int64 *, __int64);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
+long _InterlockedAnd_np(long volatile *_Value, long _Mask);
+short _InterlockedAnd16_np(short volatile *_Value, short _Mask);
+__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
+unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
+static __inline__
+unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
+long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
+ long _Comparand);
+unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_CompareandResult);
+unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+short _InterlockedCompareExchange16_np(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+__int64 _InterlockedCompareExchange64_HLEAcquire(__int64 volatile *, __int64,
+ __int64);
+__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
+ __int64);
+__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+void *_InterlockedCompareExchangePointer(void *volatile *_Destination,
+ void *_Exchange, void *_Comparand);
+void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
+ void *_Exchange, void *_Comparand);
+static __inline__
+__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
+void *_InterlockedExchangePointer(void *volatile *_Target, void *_Value);
+static __inline__
+__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
+long _InterlockedOr_np(long volatile *_Value, long _Mask);
+short _InterlockedOr16_np(short volatile *_Value, short _Mask);
+static __inline__
+__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedOr8_np(char volatile *_Value, char _Mask);
+long _InterlockedXor_np(long volatile *_Value, long _Mask);
+short _InterlockedXor16_np(short volatile *_Value, short _Mask);
+static __inline__
+__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedXor8_np(char volatile *_Value, char _Mask);
+static __inline__
+__int64 _mul128(__int64 _Multiplier, __int64 _Multiplicand,
+ __int64 *_HighProduct);
+unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
+__int64 _sarx_i64(__int64, unsigned int);
+#if __STDC_HOSTED__
+int __cdecl _setjmpex(jmp_buf);
+#endif
+unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
+unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
+/*
+ * Multiply two 64-bit integers and obtain a 64-bit result.
+ * The low-half is returned directly and the high half is in an out parameter.
+ */
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_umul128(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand,
+ unsigned __int64 *_HighProduct) {
+ unsigned __int128 _FullProduct =
+ (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
+ *_HighProduct = _FullProduct >> 64;
+ return _FullProduct;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__umulh(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand) {
+ unsigned __int128 _FullProduct =
+ (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
+ return _FullProduct >> 64;
+}
+
+#endif /* __x86_64__ */
+
+/*----------------------------------------------------------------------------*\
+|* Multiplication
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+__emul(int __in1, int __in2) {
+ return (__int64)__in1 * (__int64)__in2;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__emulu(unsigned int __in1, unsigned int __in2) {
+ return (unsigned __int64)__in1 * (unsigned __int64)__in2;
+}
+/*----------------------------------------------------------------------------*\
+|* Bit Twiddling
+\*----------------------------------------------------------------------------*/
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_rotl8(unsigned char _Value, unsigned char _Shift) {
+ _Shift &= 0x7;
+ return _Shift ? (_Value << _Shift) | (_Value >> (8 - _Shift)) : _Value;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_rotr8(unsigned char _Value, unsigned char _Shift) {
+ _Shift &= 0x7;
+ return _Shift ? (_Value >> _Shift) | (_Value << (8 - _Shift)) : _Value;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+_rotl16(unsigned short _Value, unsigned char _Shift) {
+ _Shift &= 0xf;
+ return _Shift ? (_Value << _Shift) | (_Value >> (16 - _Shift)) : _Value;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+_rotr16(unsigned short _Value, unsigned char _Shift) {
+ _Shift &= 0xf;
+ return _Shift ? (_Value >> _Shift) | (_Value << (16 - _Shift)) : _Value;
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rotl(unsigned int _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rotr(unsigned int _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+_lrotl(unsigned long _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+_lrotr(unsigned long _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
+}
+static
+__inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_rotl64(unsigned __int64 _Value, int _Shift) {
+ _Shift &= 0x3f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (64 - _Shift)) : _Value;
+}
+static
+__inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_rotr64(unsigned __int64 _Value, int _Shift) {
+ _Shift &= 0x3f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (64 - _Shift)) : _Value;
+}
+/*----------------------------------------------------------------------------*\
+|* Bit Counting and Testing
+\*----------------------------------------------------------------------------*/
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanForward(unsigned long *_Index, unsigned long _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = __builtin_ctzl(_Mask);
+ return 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanReverse(unsigned long *_Index, unsigned long _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = 31 - __builtin_clzl(_Mask);
+ return 1;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__popcnt16(unsigned short _Value) {
+ return __builtin_popcount((int)_Value);
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__popcnt(unsigned int _Value) {
+ return __builtin_popcount(_Value);
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittest(long const *_BitBase, long _BitPos) {
+ return (*_BitBase >> _BitPos) & 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandcomplement(long *_BitBase, long _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase ^ (1 << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandreset(long *_BitBase, long _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase & ~(1 << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandset(long *_BitBase, long _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase | (1 << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_interlockedbittestandset(long volatile *_BitBase, long _BitPos) {
+ long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_SEQ_CST);
+ return (_PrevVal >> _BitPos) & 1;
+}
+#ifdef __x86_64__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = __builtin_ctzll(_Mask);
+ return 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = 63 - __builtin_clzll(_Mask);
+ return 1;
+}
+static __inline__
+unsigned __int64 __DEFAULT_FN_ATTRS
+__popcnt64(unsigned __int64 _Value) {
+ return __builtin_popcountll(_Value);
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittest64(__int64 const *_BitBase, __int64 _BitPos) {
+ return (*_BitBase >> _BitPos) & 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandcomplement64(__int64 *_BitBase, __int64 _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase ^ (1ll << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandreset64(__int64 *_BitBase, __int64 _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase & ~(1ll << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandset64(__int64 *_BitBase, __int64 _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase | (1ll << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_interlockedbittestandset64(__int64 volatile *_BitBase, __int64 _BitPos) {
+ long long _PrevVal =
+ __atomic_fetch_or(_BitBase, 1ll << _BitPos, __ATOMIC_SEQ_CST);
+ return (_PrevVal >> _BitPos) & 1;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Add
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd8(char volatile *_Addend, char _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Sub
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub8(char volatile *_Subend, char _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub16(short volatile *_Subend, short _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub(long volatile *_Subend, long _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Increment
+\*----------------------------------------------------------------------------*/
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedIncrement16(short volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedIncrement64(__int64 volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Decrement
+\*----------------------------------------------------------------------------*/
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedDecrement16(short volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedDecrement64(__int64 volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked And
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedAnd8(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedAnd16(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedAnd(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Or
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedOr8(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedOr16(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedOr(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Xor
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedXor8(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedXor16(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedXor(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchange8(char volatile *_Target, char _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+ return _Value;
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchange16(short volatile *_Target, short _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+ return _Value;
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+ return _Value;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Compare Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange8(char volatile *_Destination,
+ char _Exchange, char _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return _Comparand;
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange16(short volatile *_Destination,
+ short _Exchange, short _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return _Comparand;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange64(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return _Comparand;
+}
+/*----------------------------------------------------------------------------*\
+|* Barriers
+\*----------------------------------------------------------------------------*/
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_ReadWriteBarrier(void) {
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_ReadBarrier(void) {
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_WriteBarrier(void) {
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+__faststorefence(void) {
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* readfs, readgs
+|* (Pointers in address space #256 and #257 are relative to the GS and FS
+|* segment registers, respectively.)
+\*----------------------------------------------------------------------------*/
+#define __ptr_to_addr_space(__addr_space_nbr, __type, __offset) \
+ ((volatile __type __attribute__((__address_space__(__addr_space_nbr)))*) \
+ (__offset))
+
+#ifdef __i386__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+__readfsbyte(unsigned long __offset) {
+ return *__ptr_to_addr_space(257, unsigned char, __offset);
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__readfsword(unsigned long __offset) {
+ return *__ptr_to_addr_space(257, unsigned short, __offset);
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readfsqword(unsigned long __offset) {
+ return *__ptr_to_addr_space(257, unsigned __int64, __offset);
+}
+#endif
+#ifdef __x86_64__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+__readgsbyte(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned char, __offset);
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__readgsword(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned short, __offset);
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+__readgsdword(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned long, __offset);
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readgsqword(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned __int64, __offset);
+}
+#endif
+#undef __ptr_to_addr_space
+/*----------------------------------------------------------------------------*\
+|* movs, stos
+\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
+ __asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
+ __asm__("rep movsl" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
+ __asm__("rep movsw" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosb(unsigned char *__dst, unsigned char __x, size_t __n) {
+ __asm__("rep stosb" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
+ __asm__("rep stosl" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
+ __asm__("rep stosw" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+#endif
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
+ __asm__("rep movsq" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
+ __asm__("rep stosq" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+#endif
+
+/*----------------------------------------------------------------------------*\
+|* Misc
+\*----------------------------------------------------------------------------*/
+static __inline__ void * __DEFAULT_FN_ATTRS
+_AddressOfReturnAddress(void) {
+ return (void*)((char*)__builtin_frame_address(0) + sizeof(void*));
+}
+static __inline__ void * __DEFAULT_FN_ATTRS
+_ReturnAddress(void) {
+ return __builtin_return_address(0);
+}
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ void __DEFAULT_FN_ATTRS
+__cpuid(int __info[4], int __level) {
+ __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
+ : "a"(__level));
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__cpuidex(int __info[4], int __level, int __ecx) {
+ __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
+ : "a"(__level), "c"(__ecx));
+}
+static __inline__ unsigned __int64 __cdecl __DEFAULT_FN_ATTRS
+_xgetbv(unsigned int __xcr_no) {
+ unsigned int __eax, __edx;
+ __asm__ ("xgetbv" : "=a" (__eax), "=d" (__edx) : "c" (__xcr_no));
+ return ((unsigned __int64)__edx << 32) | __eax;
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__halt(void) {
+ __asm__ volatile ("hlt");
+}
+#endif
+
+/*----------------------------------------------------------------------------*\
+|* Privileged intrinsics
+\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readmsr(unsigned long __register) {
+ // Loads the contents of a 64-bit model specific register (MSR) specified in
+ // the ECX register into registers EDX:EAX. The EDX register is loaded with
+ // the high-order 32 bits of the MSR and the EAX register is loaded with the
+ // low-order 32 bits. If less than 64 bits are implemented in the MSR being
+ // read, the values returned to EDX:EAX in unimplemented bit locations are
+ // undefined.
+ unsigned long __edx;
+ unsigned long __eax;
+ __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register));
+ return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;
+}
+
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+__readcr3(void) {
+ unsigned long __cr3_val;
+ __asm__ __volatile__ ("mov %%cr3, %0" : "=q"(__cr3_val) : : "memory");
+ return __cr3_val;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+__writecr3(unsigned int __cr3_val) {
+ __asm__ ("mov %0, %%cr3" : : "q"(__cr3_val) : "memory");
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __INTRIN_H */
+#endif /* _MSC_VER */
diff --git a/current/clang-include/inttypes.h b/current/clang-include/inttypes.h
new file mode 100644
index 0000000..1d8eaba
--- /dev/null
+++ b/current/clang-include/inttypes.h
@@ -0,0 +1,106 @@
+/*===---- inttypes.h - Standard header for integer printf macros ----------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_INTTYPES_H
+#define __CLANG_INTTYPES_H
+
+#if defined(_MSC_VER) && _MSC_VER < 1800
+#error MSVC does not have inttypes.h prior to Visual Studio 2013
+#endif
+
+#include_next <inttypes.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1900
+/* MSVC headers define int32_t as int, but PRIx32 as "lx" instead of "x".
+ * This triggers format warnings, so fix it up here. */
+#undef PRId32
+#undef PRIdLEAST32
+#undef PRIdFAST32
+#undef PRIi32
+#undef PRIiLEAST32
+#undef PRIiFAST32
+#undef PRIo32
+#undef PRIoLEAST32
+#undef PRIoFAST32
+#undef PRIu32
+#undef PRIuLEAST32
+#undef PRIuFAST32
+#undef PRIx32
+#undef PRIxLEAST32
+#undef PRIxFAST32
+#undef PRIX32
+#undef PRIXLEAST32
+#undef PRIXFAST32
+
+#undef SCNd32
+#undef SCNdLEAST32
+#undef SCNdFAST32
+#undef SCNi32
+#undef SCNiLEAST32
+#undef SCNiFAST32
+#undef SCNo32
+#undef SCNoLEAST32
+#undef SCNoFAST32
+#undef SCNu32
+#undef SCNuLEAST32
+#undef SCNuFAST32
+#undef SCNx32
+#undef SCNxLEAST32
+#undef SCNxFAST32
+
+#define PRId32 "d"
+#define PRIdLEAST32 "d"
+#define PRIdFAST32 "d"
+#define PRIi32 "i"
+#define PRIiLEAST32 "i"
+#define PRIiFAST32 "i"
+#define PRIo32 "o"
+#define PRIoLEAST32 "o"
+#define PRIoFAST32 "o"
+#define PRIu32 "u"
+#define PRIuLEAST32 "u"
+#define PRIuFAST32 "u"
+#define PRIx32 "x"
+#define PRIxLEAST32 "x"
+#define PRIxFAST32 "x"
+#define PRIX32 "X"
+#define PRIXLEAST32 "X"
+#define PRIXFAST32 "X"
+
+#define SCNd32 "d"
+#define SCNdLEAST32 "d"
+#define SCNdFAST32 "d"
+#define SCNi32 "i"
+#define SCNiLEAST32 "i"
+#define SCNiFAST32 "i"
+#define SCNo32 "o"
+#define SCNoLEAST32 "o"
+#define SCNoFAST32 "o"
+#define SCNu32 "u"
+#define SCNuLEAST32 "u"
+#define SCNuFAST32 "u"
+#define SCNx32 "x"
+#define SCNxLEAST32 "x"
+#define SCNxFAST32 "x"
+#endif
+
+#endif /* __CLANG_INTTYPES_H */
diff --git a/current/clang-include/iso646.h b/current/clang-include/iso646.h
new file mode 100644
index 0000000..dca13c5
--- /dev/null
+++ b/current/clang-include/iso646.h
@@ -0,0 +1,43 @@
+/*===---- iso646.h - Standard header for alternate spellings of operators---===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ISO646_H
+#define __ISO646_H
+
+#ifndef __cplusplus
+#define and &&
+#define and_eq &=
+#define bitand &
+#define bitor |
+#define compl ~
+#define not !
+#define not_eq !=
+#define or ||
+#define or_eq |=
+#define xor ^
+#define xor_eq ^=
+#endif
+
+#endif /* __ISO646_H */
diff --git a/current/clang-include/limits.h b/current/clang-include/limits.h
new file mode 100644
index 0000000..f04187c
--- /dev/null
+++ b/current/clang-include/limits.h
@@ -0,0 +1,118 @@
+/*===---- limits.h - Standard header for integer sizes --------------------===*\
+ *
+ * Copyright (c) 2009 Chris Lattner
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_LIMITS_H
+#define __CLANG_LIMITS_H
+
+/* The system's limits.h may, in turn, try to #include_next GCC's limits.h.
+ Avert this #include_next madness. */
+#if defined __GNUC__ && !defined _GCC_LIMITS_H_
+#define _GCC_LIMITS_H_
+#endif
+
+/* System headers include a number of constants from POSIX in <limits.h>.
+ Include it if we're hosted. */
+#if __STDC_HOSTED__ && __has_include_next(<limits.h>)
+#include_next <limits.h>
+#endif
+
+/* Many system headers try to "help us out" by defining these. No really, we
+ know how big each datatype is. */
+#undef SCHAR_MIN
+#undef SCHAR_MAX
+#undef UCHAR_MAX
+#undef SHRT_MIN
+#undef SHRT_MAX
+#undef USHRT_MAX
+#undef INT_MIN
+#undef INT_MAX
+#undef UINT_MAX
+#undef LONG_MIN
+#undef LONG_MAX
+#undef ULONG_MAX
+
+#undef CHAR_BIT
+#undef CHAR_MIN
+#undef CHAR_MAX
+
+/* C90/99 5.2.4.2.1 */
+#define SCHAR_MAX __SCHAR_MAX__
+#define SHRT_MAX __SHRT_MAX__
+#define INT_MAX __INT_MAX__
+#define LONG_MAX __LONG_MAX__
+
+#define SCHAR_MIN (-__SCHAR_MAX__-1)
+#define SHRT_MIN (-__SHRT_MAX__ -1)
+#define INT_MIN (-__INT_MAX__ -1)
+#define LONG_MIN (-__LONG_MAX__ -1L)
+
+#define UCHAR_MAX (__SCHAR_MAX__*2 +1)
+#define USHRT_MAX (__SHRT_MAX__ *2 +1)
+#define UINT_MAX (__INT_MAX__ *2U +1U)
+#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
+
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+#define CHAR_BIT __CHAR_BIT__
+
+#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */
+#define CHAR_MIN 0
+#define CHAR_MAX UCHAR_MAX
+#else
+#define CHAR_MIN SCHAR_MIN
+#define CHAR_MAX __SCHAR_MAX__
+#endif
+
+/* C99 5.2.4.2.1: Added long long.
+ C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
+ */
+#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L
+
+#undef LLONG_MIN
+#undef LLONG_MAX
+#undef ULLONG_MAX
+
+#define LLONG_MAX __LONG_LONG_MAX__
+#define LLONG_MIN (-__LONG_LONG_MAX__-1LL)
+#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
+#endif
+
+/* LONG_LONG_MIN/LONG_LONG_MAX/ULONG_LONG_MAX are a GNU extension. It's too bad
+ that we don't have something like #pragma poison that could be used to
+ deprecate a macro - the code should just use LLONG_MAX and friends.
+ */
+#if defined(__GNU_LIBRARY__) ? defined(__USE_GNU) : !defined(__STRICT_ANSI__)
+
+#undef LONG_LONG_MIN
+#undef LONG_LONG_MAX
+#undef ULONG_LONG_MAX
+
+#define LONG_LONG_MAX __LONG_LONG_MAX__
+#define LONG_LONG_MIN (-__LONG_LONG_MAX__-1LL)
+#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
+#endif
+
+#endif /* __CLANG_LIMITS_H */
diff --git a/current/clang-include/lzcntintrin.h b/current/clang-include/lzcntintrin.h
new file mode 100644
index 0000000..4c00e42
--- /dev/null
+++ b/current/clang-include/lzcntintrin.h
@@ -0,0 +1,68 @@
+/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __LZCNTINTRIN_H
+#define __LZCNTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__lzcnt16(unsigned short __X)
+{
+ return __X ? __builtin_clzs(__X) : 16;
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__lzcnt32(unsigned int __X)
+{
+ return __X ? __builtin_clz(__X) : 32;
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_lzcnt_u32(unsigned int __X)
+{
+ return __X ? __builtin_clz(__X) : 32;
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__lzcnt64(unsigned long long __X)
+{
+ return __X ? __builtin_clzll(__X) : 64;
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_lzcnt_u64(unsigned long long __X)
+{
+ return __X ? __builtin_clzll(__X) : 64;
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __LZCNTINTRIN_H */
diff --git a/current/clang-include/mm3dnow.h b/current/clang-include/mm3dnow.h
new file mode 100644
index 0000000..294866c
--- /dev/null
+++ b/current/clang-include/mm3dnow.h
@@ -0,0 +1,171 @@
+/*===---- mm3dnow.h - 3DNow! intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _MM3DNOW_H_INCLUDED
+#define _MM3DNOW_H_INCLUDED
+
+#include <mmintrin.h>
+#include <prfchwintrin.h>
+
+typedef float __v2sf __attribute__((__vector_size__(8)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_m_femms(void) {
+ __builtin_ia32_femms();
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pavgusb(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pf2id(__m64 __m) {
+ return (__m64)__builtin_ia32_pf2id((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfadd(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfadd((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfcmpeq(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfcmpge(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpge((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfcmpgt(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfmax(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmax((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfmin(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmin((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfmul(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmul((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrcp(__m64 __m) {
+ return (__m64)__builtin_ia32_pfrcp((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrcpit1(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrcpit2(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrsqrt(__m64 __m) {
+ return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrsqrtit1(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfsub(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfsub((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfsubr(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfsubr((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pi2fd(__m64 __m) {
+ return (__m64)__builtin_ia32_pi2fd((__v2si)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pmulhrw(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Handle the 3dnowa instructions here. */
+#undef __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa")))
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pf2iw(__m64 __m) {
+ return (__m64)__builtin_ia32_pf2iw((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfnacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfnacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfpnacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfpnacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pi2fw(__m64 __m) {
+ return (__m64)__builtin_ia32_pi2fw((__v2si)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pswapdsf(__m64 __m) {
+ return (__m64)__builtin_ia32_pswapdsf((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pswapdsi(__m64 __m) {
+ return (__m64)__builtin_ia32_pswapdsi((__v2si)__m);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/mm_malloc.h b/current/clang-include/mm_malloc.h
new file mode 100644
index 0000000..305afd3
--- /dev/null
+++ b/current/clang-include/mm_malloc.h
@@ -0,0 +1,75 @@
+/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __MM_MALLOC_H
+#define __MM_MALLOC_H
+
+#include <stdlib.h>
+
+#ifdef _WIN32
+#include <malloc.h>
+#else
+#ifndef __cplusplus
+extern int posix_memalign(void **__memptr, size_t __alignment, size_t __size);
+#else
+// Some systems (e.g. those with GNU libc) declare posix_memalign with an
+// exception specifier. Via an "egregious workaround" in
+// Sema::CheckEquivalentExceptionSpec, Clang accepts the following as a valid
+// redeclaration of glibc's declaration.
+extern "C" int posix_memalign(void **__memptr, size_t __alignment, size_t __size);
+#endif
+#endif
+
+#if !(defined(_WIN32) && defined(_mm_malloc))
+static __inline__ void *__attribute__((__always_inline__, __nodebug__,
+ __malloc__))
+_mm_malloc(size_t __size, size_t __align)
+{
+ if (__align == 1) {
+ return malloc(__size);
+ }
+
+ if (!(__align & (__align - 1)) && __align < sizeof(void *))
+ __align = sizeof(void *);
+
+ void *__mallocedMemory;
+#if defined(__MINGW32__)
+ __mallocedMemory = __mingw_aligned_malloc(__size, __align);
+#elif defined(_WIN32)
+ __mallocedMemory = _aligned_malloc(__size, __align);
+#else
+ if (posix_memalign(&__mallocedMemory, __align, __size))
+ return 0;
+#endif
+
+ return __mallocedMemory;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_free(void *__p)
+{
+ free(__p);
+}
+#endif
+
+#endif /* __MM_MALLOC_H */
diff --git a/current/clang-include/mmintrin.h b/current/clang-include/mmintrin.h
new file mode 100644
index 0000000..cefd605
--- /dev/null
+++ b/current/clang-include/mmintrin.h
@@ -0,0 +1,1545 @@
+/*===---- mmintrin.h - MMX intrinsics --------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __MMINTRIN_H
+#define __MMINTRIN_H
+
+typedef long long __m64 __attribute__((__vector_size__(8)));
+
+typedef long long __v1di __attribute__((__vector_size__(8)));
+typedef int __v2si __attribute__((__vector_size__(8)));
+typedef short __v4hi __attribute__((__vector_size__(8)));
+typedef char __v8qi __attribute__((__vector_size__(8)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx")))
+
+/// \brief Clears the MMX state by setting the state of the x87 stack registers
+/// to empty.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c EMMS instruction.
+///
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_empty(void)
+{
+ __builtin_ia32_emms();
+}
+
+/// \brief Constructs a 64-bit integer vector, setting the lower 32 bits to the
+/// value of the 32-bit integer parameter and setting the upper 32 bits to 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+///
+/// \param __i
+/// A 32-bit integer value.
+/// \returns A 64-bit integer vector. The lower 32 bits contain the value of the
+/// parameter. The upper 32 bits are set to 0.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtsi32_si64(int __i)
+{
+ return (__m64)__builtin_ia32_vec_init_v2si(__i, 0);
+}
+
+/// \brief Returns the lower 32 bits of a 64-bit integer vector as a 32-bit
+/// signed integer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+///
+/// \param __m
+/// A 64-bit integer vector.
+/// \returns A 32-bit signed integer value containing the lower 32 bits of the
+/// parameter.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtsi64_si32(__m64 __m)
+{
+ return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0);
+}
+
+/// \brief Casts a 64-bit signed integer value into a 64-bit integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / MOVD instruction.
+///
+/// \param __i
+/// A 64-bit signed integer.
+/// \returns A 64-bit integer vector containing the same bitwise pattern as the
+/// parameter.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtsi64_m64(long long __i)
+{
+ return (__m64)__i;
+}
+
+/// \brief Casts a 64-bit integer vector into a 64-bit signed integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / MOVD instruction.
+///
+/// \param __m
+/// A 64-bit integer vector.
+/// \returns A 64-bit signed integer containing the same bitwise pattern as the
+/// parameter.
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtm64_si64(__m64 __m)
+{
+ return (long long)__m;
+}
+
+/// \brief Converts 16-bit signed integers from both 64-bit integer vector
+/// parameters of [4 x i16] into 8-bit signed integer values, and constructs
+/// a 64-bit integer vector of [8 x i8] as the result. Positive values
+/// greater than 0x7F are saturated to 0x7F. Negative values less than 0x80
+/// are saturated to 0x80.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PACKSSWB instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
+/// 16-bit signed integer and is converted to an 8-bit signed integer with
+/// saturation. Positive values greater than 0x7F are saturated to 0x7F.
+/// Negative values less than 0x80 are saturated to 0x80. The converted
+/// [4 x i8] values are written to the lower 32 bits of the result.
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
+/// 16-bit signed integer and is converted to an 8-bit signed integer with
+/// saturation. Positive values greater than 0x7F are saturated to 0x7F.
+/// Negative values less than 0x80 are saturated to 0x80. The converted
+/// [4 x i8] values are written to the upper 32 bits of the result.
+/// \returns A 64-bit integer vector of [8 x i8] containing the converted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_packs_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Converts 32-bit signed integers from both 64-bit integer vector
+/// parameters of [2 x i32] into 16-bit signed integer values, and constructs
+/// a 64-bit integer vector of [4 x i16] as the result. Positive values
+/// greater than 0x7FFF are saturated to 0x7FFF. Negative values less than
+/// 0x8000 are saturated to 0x8000.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PACKSSDW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a
+/// 32-bit signed integer and is converted to a 16-bit signed integer with
+/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF.
+/// Negative values less than 0x8000 are saturated to 0x8000. The converted
+/// [2 x i16] values are written to the lower 32 bits of the result.
+/// \param __m2
+/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a
+/// 32-bit signed integer and is converted to a 16-bit signed integer with
+/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF.
+/// Negative values less than 0x8000 are saturated to 0x8000. The converted
+/// [2 x i16] values are written to the upper 32 bits of the result.
+/// \returns A 64-bit integer vector of [4 x i16] containing the converted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_packs_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
+}
+
+/// \brief Converts 16-bit signed integers from both 64-bit integer vector
+/// parameters of [4 x i16] into 8-bit unsigned integer values, and
+/// constructs a 64-bit integer vector of [8 x i8] as the result. Values
+/// greater than 0xFF are saturated to 0xFF. Values less than 0 are saturated
+/// to 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PACKUSWB instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
+/// 16-bit signed integer and is converted to an 8-bit unsigned integer with
+/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
+/// than 0 are saturated to 0. The converted [4 x i8] values are written to
+/// the lower 32 bits of the result.
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
+/// 16-bit signed integer and is converted to an 8-bit unsigned integer with
+/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
+/// than 0 are saturated to 0. The converted [4 x i8] values are written to
+/// the upper 32 bits of the result.
+/// \returns A 64-bit integer vector of [8 x i8] containing the converted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_packs_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of [8 x i8]
+/// and interleaves them into a 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKHBW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [8 x i8].
+/// Bits [39:32] are written to bits [7:0] of the result.
+/// Bits [47:40] are written to bits [23:16] of the result.
+/// Bits [55:48] are written to bits [39:32] of the result.
+/// Bits [63:56] are written to bits [55:48] of the result.
+/// \param __m2
+/// A 64-bit integer vector of [8 x i8].
+/// Bits [39:32] are written to bits [15:8] of the result.
+/// Bits [47:40] are written to bits [31:24] of the result.
+/// Bits [55:48] are written to bits [47:40] of the result.
+/// Bits [63:56] are written to bits [63:56] of the result.
+/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of
+/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKHWD instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16].
+/// Bits [47:32] are written to bits [15:0] of the result.
+/// Bits [63:48] are written to bits [47:32] of the result.
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16].
+/// Bits [47:32] are written to bits [31:16] of the result.
+/// Bits [63:48] are written to bits [63:48] of the result.
+/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of
+/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKHDQ instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to
+/// the lower 32 bits of the result.
+/// \param __m2
+/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to
+/// the upper 32 bits of the result.
+/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2);
+}
+
+/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of [8 x i8]
+/// and interleaves them into a 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKLBW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [8 x i8].
+/// Bits [7:0] are written to bits [7:0] of the result.
+/// Bits [15:8] are written to bits [23:16] of the result.
+/// Bits [23:16] are written to bits [39:32] of the result.
+/// Bits [31:24] are written to bits [55:48] of the result.
+/// \param __m2
+/// A 64-bit integer vector of [8 x i8].
+/// Bits [7:0] are written to bits [15:8] of the result.
+/// Bits [15:8] are written to bits [31:24] of the result.
+/// Bits [23:16] are written to bits [47:40] of the result.
+/// Bits [31:24] are written to bits [63:56] of the result.
+/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of
+/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKLWD instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16].
+/// Bits [15:0] are written to bits [15:0] of the result.
+/// Bits [31:16] are written to bits [47:32] of the result.
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16].
+/// Bits [15:0] are written to bits [31:16] of the result.
+/// Bits [31:16] are written to bits [63:48] of the result.
+/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of
+/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUNPCKLDQ instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to
+/// the lower 32 bits of the result.
+/// \param __m2
+/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to
+/// the upper 32 bits of the result.
+/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2);
+}
+
+/// \brief Adds each 8-bit integer element of the first 64-bit integer vector
+/// of [8 x i8] to the corresponding 8-bit integer element of the second
+/// 64-bit integer vector of [8 x i8]. The lower 8 bits of the results are
+/// packed into a 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDB instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [8 x i8].
+/// \param __m2
+/// A 64-bit integer vector of [8 x i8].
+/// \returns A 64-bit integer vector of [8 x i8] containing the sums of both
+/// parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/// \brief Adds each 16-bit integer element of the first 64-bit integer vector
+/// of [4 x i16] to the corresponding 16-bit integer element of the second
+/// 64-bit integer vector of [4 x i16]. The lower 16 bits of the results are
+/// packed into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16].
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the sums of both
+/// parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Adds each 32-bit integer element of the first 64-bit integer vector
+/// of [2 x i32] to the corresponding 32-bit integer element of the second
+/// 64-bit integer vector of [2 x i32]. The lower 32 bits of the results are
+/// packed into a 64-bit integer vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDD instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [2 x i32].
+/// \param __m2
+/// A 64-bit integer vector of [2 x i32].
+/// \returns A 64-bit integer vector of [2 x i32] containing the sums of both
+/// parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);
+}
+
+/// \brief Adds each 8-bit signed integer element of the first 64-bit integer
+/// vector of [8 x i8] to the corresponding 8-bit signed integer element of
+/// the second 64-bit integer vector of [8 x i8]. Positive sums greater than
+/// 0x7F are saturated to 0x7F. Negative sums less than 0x80 are saturated to
+/// 0x80. The results are packed into a 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDSB instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [8 x i8].
+/// \param __m2
+/// A 64-bit integer vector of [8 x i8].
+/// \returns A 64-bit integer vector of [8 x i8] containing the saturated sums
+/// of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/// \brief Adds each 16-bit signed integer element of the first 64-bit integer
+/// vector of [4 x i16] to the corresponding 16-bit signed integer element of
+/// the second 64-bit integer vector of [4 x i16]. Positive sums greater than
+/// 0x7FFF are saturated to 0x7FFF. Negative sums less than 0x8000 are
+/// saturated to 0x8000. The results are packed into a 64-bit integer vector
+/// of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDSW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16].
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the saturated sums
+/// of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Adds each 8-bit unsigned integer element of the first 64-bit integer
+/// vector of [8 x i8] to the corresponding 8-bit unsigned integer element of
+/// the second 64-bit integer vector of [8 x i8]. Sums greater than 0xFF are
+/// saturated to 0xFF. The results are packed into a 64-bit integer vector of
+/// [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDUSB instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [8 x i8].
+/// \param __m2
+/// A 64-bit integer vector of [8 x i8].
+/// \returns A 64-bit integer vector of [8 x i8] containing the saturated
+/// unsigned sums of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pu8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/// \brief Adds each 16-bit unsigned integer element of the first 64-bit integer
+/// vector of [4 x i16] to the corresponding 16-bit unsigned integer element
+/// of the second 64-bit integer vector of [4 x i16]. Sums greater than
+/// 0xFFFF are saturated to 0xFFFF. The results are packed into a 64-bit
+/// integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PADDUSW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16].
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the saturated
+/// unsigned sums of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Subtracts each 8-bit integer element of the second 64-bit integer
+/// vector of [8 x i8] from the corresponding 8-bit integer element of the
+/// first 64-bit integer vector of [8 x i8]. The lower 8 bits of the results
+/// are packed into a 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBB instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [8 x i8] containing the minuends.
+/// \param __m2
+/// A 64-bit integer vector of [8 x i8] containing the subtrahends.
+/// \returns A 64-bit integer vector of [8 x i8] containing the differences of
+/// both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/// \brief Subtracts each 16-bit integer element of the second 64-bit integer
+/// vector of [4 x i16] from the corresponding 16-bit integer element of the
+/// first 64-bit integer vector of [4 x i16]. The lower 16 bits of the
+/// results are packed into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16] containing the minuends.
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16] containing the subtrahends.
+/// \returns A 64-bit integer vector of [4 x i16] containing the differences of
+/// both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Subtracts each 32-bit integer element of the second 64-bit integer
+/// vector of [2 x i32] from the corresponding 32-bit integer element of the
+/// first 64-bit integer vector of [2 x i32]. The lower 32 bits of the
+/// results are packed into a 64-bit integer vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBD instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [2 x i32] containing the minuends.
+/// \param __m2
+/// A 64-bit integer vector of [2 x i32] containing the subtrahends.
+/// \returns A 64-bit integer vector of [2 x i32] containing the differences of
+/// both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);
+}
+
+/// \brief Subtracts each 8-bit signed integer element of the second 64-bit
+/// integer vector of [8 x i8] from the corresponding 8-bit signed integer
+/// element of the first 64-bit integer vector of [8 x i8]. Positive results
+/// greater than 0x7F are saturated to 0x7F. Negative results less than 0x80
+/// are saturated to 0x80. The results are packed into a 64-bit integer
+/// vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBSB instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [8 x i8] containing the minuends.
+/// \param __m2
+/// A 64-bit integer vector of [8 x i8] containing the subtrahends.
+/// \returns A 64-bit integer vector of [8 x i8] containing the saturated
+/// differences of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/// \brief Subtracts each 16-bit signed integer element of the second 64-bit
+/// integer vector of [4 x i16] from the corresponding 16-bit signed integer
+/// element of the first 64-bit integer vector of [4 x i16]. Positive results
+/// greater than 0x7FFF are saturated to 0x7FFF. Negative results less than
+/// 0x8000 are saturated to 0x8000. The results are packed into a 64-bit
+/// integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBSW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16] containing the minuends.
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16] containing the subtrahends.
+/// \returns A 64-bit integer vector of [4 x i16] containing the saturated
+/// differences of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Subtracts each 8-bit unsigned integer element of the second 64-bit
+/// integer vector of [8 x i8] from the corresponding 8-bit unsigned integer
+/// element of the first 64-bit integer vector of [8 x i8]. If an element of
+/// the first vector is less than the corresponding element of the second
+/// vector, the result is saturated to 0. The results are packed into a
+/// 64-bit integer vector of [8 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBUSB instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [8 x i8] containing the minuends.
+/// \param __m2
+/// A 64-bit integer vector of [8 x i8] containing the subtrahends.
+/// \returns A 64-bit integer vector of [8 x i8] containing the saturated
+/// differences of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pu8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/// \brief Subtracts each 16-bit unsigned integer element of the second 64-bit
+/// integer vector of [4 x i16] from the corresponding 16-bit unsigned
+/// integer element of the first 64-bit integer vector of [4 x i16]. If an
+/// element of the first vector is less than the corresponding element of the
+/// second vector, the result is saturated to 0. The results are packed into
+/// a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSUBUSW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16] containing the minuends.
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16] containing the subtrahends.
+/// \returns A 64-bit integer vector of [4 x i16] containing the saturated
+/// differences of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Multiplies each 16-bit signed integer element of the first 64-bit
+/// integer vector of [4 x i16] by the corresponding 16-bit signed integer
+/// element of the second 64-bit integer vector of [4 x i16] and get four
+/// 32-bit products. Adds adjacent pairs of products to get two 32-bit sums.
+/// The lower 32 bits of these two sums are packed into a 64-bit integer
+/// vector of [2 x i32]. For example, bits [15:0] of both parameters are
+/// multiplied, bits [31:16] of both parameters are multiplied, and the sum
+/// of both results is written to bits [31:0] of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMADDWD instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16].
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [2 x i32] containing the sums of
+/// products of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_madd_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Multiplies each 16-bit signed integer element of the first 64-bit
+/// integer vector of [4 x i16] by the corresponding 16-bit signed integer
+/// element of the second 64-bit integer vector of [4 x i16]. Packs the upper
+/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMULHW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16].
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the upper 16 bits
+/// of the products of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mulhi_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Multiplies each 16-bit signed integer element of the first 64-bit
+/// integer vector of [4 x i16] by the corresponding 16-bit signed integer
+/// element of the second 64-bit integer vector of [4 x i16]. Packs the lower
+/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMULLW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16].
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits
+/// of the products of both parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mullo_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Left-shifts each 16-bit signed integer element of the first
+/// parameter, which is a 64-bit integer vector of [4 x i16], by the number
+/// of bits specified by the second parameter, which is a 64-bit integer. The
+/// lower 16 bits of the results are packed into a 64-bit integer vector of
+/// [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLW instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [4 x i16].
+/// \param __count
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted
+/// values. If __count is greater or equal to 16, the result is set to all 0.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sll_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count);
+}
+
+/// \brief Left-shifts each 16-bit signed integer element of a 64-bit integer
+/// vector of [4 x i16] by the number of bits specified by a 32-bit integer.
+/// The lower 16 bits of the results are packed into a 64-bit integer vector
+/// of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLW instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [4 x i16].
+/// \param __count
+/// A 32-bit integer value.
+/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted
+/// values. If __count is greater or equal to 16, the result is set to all 0.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_slli_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
+}
+
+/// \brief Left-shifts each 32-bit signed integer element of the first
+/// parameter, which is a 64-bit integer vector of [2 x i32], by the number
+/// of bits specified by the second parameter, which is a 64-bit integer. The
+/// lower 32 bits of the results are packed into a 64-bit integer vector of
+/// [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLD instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [2 x i32].
+/// \param __count
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted
+/// values. If __count is greater or equal to 32, the result is set to all 0.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sll_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_pslld((__v2si)__m, __count);
+}
+
+/// \brief Left-shifts each 32-bit signed integer element of a 64-bit integer
+/// vector of [2 x i32] by the number of bits specified by a 32-bit integer.
+/// The lower 32 bits of the results are packed into a 64-bit integer vector
+/// of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLD instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [2 x i32].
+/// \param __count
+/// A 32-bit integer value.
+/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted
+/// values. If __count is greater or equal to 32, the result is set to all 0.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_slli_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count);
+}
+
+/// \brief Left-shifts the first 64-bit integer parameter by the number of bits
+/// specified by the second 64-bit integer parameter. The lower 64 bits of
+/// result are returned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLQ instruction.
+///
+/// \param __m
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \param __count
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector containing the left-shifted value. If
+/// __count is greater or equal to 64, the result is set to 0.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sll_si64(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psllq((__v1di)__m, __count);
+}
+
+/// \brief Left-shifts the first parameter, which is a 64-bit integer, by the
+/// number of bits specified by the second parameter, which is a 32-bit
+/// integer. The lower 64 bits of result are returned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSLLQ instruction.
+///
+/// \param __m
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \param __count
+/// A 32-bit integer value.
+/// \returns A 64-bit integer vector containing the left-shifted value. If
+/// __count is greater or equal to 64, the result is set to 0.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_slli_si64(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psllqi((__v1di)__m, __count);
+}
+
+/// \brief Right-shifts each 16-bit integer element of the first parameter,
+/// which is a 64-bit integer vector of [4 x i16], by the number of bits
+/// specified by the second parameter, which is a 64-bit integer. High-order
+/// bits are filled with the sign bit of the initial value of each 16-bit
+/// element. The 16-bit results are packed into a 64-bit integer vector of
+/// [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRAW instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [4 x i16].
+/// \param __count
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sra_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
+}
+
+/// \brief Right-shifts each 16-bit integer element of a 64-bit integer vector
+/// of [4 x i16] by the number of bits specified by a 32-bit integer.
+/// High-order bits are filled with the sign bit of the initial value of each
+/// 16-bit element. The 16-bit results are packed into a 64-bit integer
+/// vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRAW instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [4 x i16].
+/// \param __count
+/// A 32-bit integer value.
+/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srai_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count);
+}
+
+/// \brief Right-shifts each 32-bit integer element of the first parameter,
+/// which is a 64-bit integer vector of [2 x i32], by the number of bits
+/// specified by the second parameter, which is a 64-bit integer. High-order
+/// bits are filled with the sign bit of the initial value of each 32-bit
+/// element. The 32-bit results are packed into a 64-bit integer vector of
+/// [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRAD instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [2 x i32].
+/// \param __count
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sra_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
+}
+
+/// \brief Right-shifts each 32-bit integer element of a 64-bit integer vector
+/// of [2 x i32] by the number of bits specified by a 32-bit integer.
+/// High-order bits are filled with the sign bit of the initial value of each
+/// 32-bit element. The 32-bit results are packed into a 64-bit integer
+/// vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRAD instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [2 x i32].
+/// \param __count
+/// A 32-bit integer value.
+/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srai_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psradi((__v2si)__m, __count);
+}
+
+/// \brief Right-shifts each 16-bit integer element of the first parameter,
+/// which is a 64-bit integer vector of [4 x i16], by the number of bits
+/// specified by the second parameter, which is a 64-bit integer. High-order
+/// bits are cleared. The 16-bit results are packed into a 64-bit integer
+/// vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLW instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [4 x i16].
+/// \param __count
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srl_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
+}
+
+/// \brief Right-shifts each 16-bit integer element of a 64-bit integer vector
+/// of [4 x i16] by the number of bits specified by a 32-bit integer.
+/// High-order bits are cleared. The 16-bit results are packed into a 64-bit
+/// integer vector of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLW instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [4 x i16].
+/// \param __count
+/// A 32-bit integer value.
+/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srli_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
+}
+
+/// \brief Right-shifts each 32-bit integer element of the first parameter,
+/// which is a 64-bit integer vector of [2 x i32], by the number of bits
+/// specified by the second parameter, which is a 64-bit integer. High-order
+/// bits are cleared. The 32-bit results are packed into a 64-bit integer
+/// vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLD instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [2 x i32].
+/// \param __count
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srl_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
+}
+
+/// \brief Right-shifts each 32-bit integer element of a 64-bit integer vector
+/// of [2 x i32] by the number of bits specified by a 32-bit integer.
+/// High-order bits are cleared. The 32-bit results are packed into a 64-bit
+/// integer vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLD instruction.
+///
+/// \param __m
+/// A 64-bit integer vector of [2 x i32].
+/// \param __count
+/// A 32-bit integer value.
+/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srli_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count);
+}
+
+/// \brief Right-shifts the first 64-bit integer parameter by the number of bits
+/// specified by the second 64-bit integer parameter. High-order bits are
+/// cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLQ instruction.
+///
+/// \param __m
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \param __count
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \returns A 64-bit integer vector containing the right-shifted value.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srl_si64(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrlq((__v1di)__m, __count);
+}
+
+/// \brief Right-shifts the first parameter, which is a 64-bit integer, by the
+/// number of bits specified by the second parameter, which is a 32-bit
+/// integer. High-order bits are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSRLQ instruction.
+///
+/// \param __m
+/// A 64-bit integer vector interpreted as a single 64-bit integer.
+/// \param __count
+/// A 32-bit integer value.
+/// \returns A 64-bit integer vector containing the right-shifted value.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srli_si64(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrlqi((__v1di)__m, __count);
+}
+
+/// \brief Performs a bitwise AND of two 64-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PAND instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector.
+/// \param __m2
+/// A 64-bit integer vector.
+/// \returns A 64-bit integer vector containing the bitwise AND of both
+/// parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_and_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pand((__v1di)__m1, (__v1di)__m2);
+}
+
+/// \brief Performs a bitwise NOT of the first 64-bit integer vector, and then
+/// performs a bitwise AND of the intermediate result and the second 64-bit
+/// integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PANDN instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector. The one's complement of this parameter is used
+/// in the bitwise AND.
+/// \param __m2
+/// A 64-bit integer vector.
+/// \returns A 64-bit integer vector containing the bitwise AND of the second
+/// parameter and the one's complement of the first parameter.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_andnot_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pandn((__v1di)__m1, (__v1di)__m2);
+}
+
+/// \brief Performs a bitwise OR of two 64-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POR instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector.
+/// \param __m2
+/// A 64-bit integer vector.
+/// \returns A 64-bit integer vector containing the bitwise OR of both
+/// parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_or_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_por((__v1di)__m1, (__v1di)__m2);
+}
+
+/// \brief Performs a bitwise exclusive OR of two 64-bit integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PXOR instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector.
+/// \param __m2
+/// A 64-bit integer vector.
+/// \returns A 64-bit integer vector containing the bitwise exclusive OR of both
+/// parameters.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_xor_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pxor((__v1di)__m1, (__v1di)__m2);
+}
+
+/// \brief Compares the 8-bit integer elements of two 64-bit integer vectors of
+/// [8 x i8] to determine if the element of the first vector is equal to the
+/// corresponding element of the second vector. The comparison yields 0 for
+/// false, 0xFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPEQB instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [8 x i8].
+/// \param __m2
+/// A 64-bit integer vector of [8 x i8].
+/// \returns A 64-bit integer vector of [8 x i8] containing the comparison
+/// results.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/// \brief Compares the 16-bit integer elements of two 64-bit integer vectors of
+/// [4 x i16] to determine if the element of the first vector is equal to the
+/// corresponding element of the second vector. The comparison yields 0 for
+/// false, 0xFFFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPEQW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16].
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the comparison
+/// results.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Compares the 32-bit integer elements of two 64-bit integer vectors of
+/// [2 x i32] to determine if the element of the first vector is equal to the
+/// corresponding element of the second vector. The comparison yields 0 for
+/// false, 0xFFFFFFFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPEQD instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [2 x i32].
+/// \param __m2
+/// A 64-bit integer vector of [2 x i32].
+/// \returns A 64-bit integer vector of [2 x i32] containing the comparison
+/// results.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2);
+}
+
+/// \brief Compares the 8-bit integer elements of two 64-bit integer vectors of
+/// [8 x i8] to determine if the element of the first vector is greater than
+/// the corresponding element of the second vector. The comparison yields 0
+/// for false, 0xFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPGTB instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [8 x i8].
+/// \param __m2
+/// A 64-bit integer vector of [8 x i8].
+/// \returns A 64-bit integer vector of [8 x i8] containing the comparison
+/// results.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/// \brief Compares the 16-bit integer elements of two 64-bit integer vectors of
+/// [4 x i16] to determine if the element of the first vector is greater than
+/// the corresponding element of the second vector. The comparison yields 0
+/// for false, 0xFFFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPGTW instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [4 x i16].
+/// \param __m2
+/// A 64-bit integer vector of [4 x i16].
+/// \returns A 64-bit integer vector of [4 x i16] containing the comparison
+/// results.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/// \brief Compares the 32-bit integer elements of two 64-bit integer vectors of
+/// [2 x i32] to determine if the element of the first vector is greater than
+/// the corresponding element of the second vector. The comparison yields 0
+/// for false, 0xFFFFFFFF for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PCMPGTD instruction.
+///
+/// \param __m1
+/// A 64-bit integer vector of [2 x i32].
+/// \param __m2
+/// A 64-bit integer vector of [2 x i32].
+/// \returns A 64-bit integer vector of [2 x i32] containing the comparison
+/// results.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2);
+}
+
+/// \brief Constructs a 64-bit integer vector initialized to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the the \c VXORPS / XORPS instruction.
+///
+/// \returns An initialized 64-bit integer vector with all elements set to zero.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setzero_si64(void)
+{
+ return (__m64){ 0LL };
+}
+
+/// \brief Constructs a 64-bit integer vector initialized with the specified
+/// 32-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __i1
+/// A 32-bit integer value used to initialize the upper 32 bits of the
+/// result.
+/// \param __i0
+/// A 32-bit integer value used to initialize the lower 32 bits of the
+/// result.
+/// \returns An initialized 64-bit integer vector.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set_pi32(int __i1, int __i0)
+{
+ return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1);
+}
+
+/// \brief Constructs a 64-bit integer vector initialized with the specified
+/// 16-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __s3
+/// A 16-bit integer value used to initialize bits [63:48] of the result.
+/// \param __s2
+/// A 16-bit integer value used to initialize bits [47:32] of the result.
+/// \param __s1
+/// A 16-bit integer value used to initialize bits [31:16] of the result.
+/// \param __s0
+/// A 16-bit integer value used to initialize bits [15:0] of the result.
+/// \returns An initialized 64-bit integer vector.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
+{
+ return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3);
+}
+
+/// \brief Constructs a 64-bit integer vector initialized with the specified
+/// 8-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __b7
+/// An 8-bit integer value used to initialize bits [63:56] of the result.
+/// \param __b6
+/// An 8-bit integer value used to initialize bits [55:48] of the result.
+/// \param __b5
+/// An 8-bit integer value used to initialize bits [47:40] of the result.
+/// \param __b4
+/// An 8-bit integer value used to initialize bits [39:32] of the result.
+/// \param __b3
+/// An 8-bit integer value used to initialize bits [31:24] of the result.
+/// \param __b2
+/// An 8-bit integer value used to initialize bits [23:16] of the result.
+/// \param __b1
+/// An 8-bit integer value used to initialize bits [15:8] of the result.
+/// \param __b0
+/// An 8-bit integer value used to initialize bits [7:0] of the result.
+/// \returns An initialized 64-bit integer vector.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
+ char __b1, char __b0)
+{
+ return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3,
+ __b4, __b5, __b6, __b7);
+}
+
+/// \brief Constructs a 64-bit integer vector of [2 x i32], with each of the
+/// 32-bit integer vector elements set to the specified 32-bit integer
+/// value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSHUFD / PSHUFD instruction.
+///
+/// \param __i
+/// A 32-bit integer value used to initialize each vector element of the
+/// result.
+/// \returns An initialized 64-bit integer vector of [2 x i32].
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set1_pi32(int __i)
+{
+ return _mm_set_pi32(__i, __i);
+}
+
+/// \brief Constructs a 64-bit integer vector of [4 x i16], with each of the
+/// 16-bit integer vector elements set to the specified 16-bit integer
+/// value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSHUFLW / PSHUFLW instruction.
+///
+/// \param __w
+/// A 16-bit integer value used to initialize each vector element of the
+/// result.
+/// \returns An initialized 64-bit integer vector of [4 x i16].
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set1_pi16(short __w)
+{
+ return _mm_set_pi16(__w, __w, __w, __w);
+}
+
+/// \brief Constructs a 64-bit integer vector of [8 x i8], with each of the
+/// 8-bit integer vector elements set to the specified 8-bit integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKLBW + VPSHUFLW / \c PUNPCKLBW +
+/// PSHUFLW instruction.
+///
+/// \param __b
+/// An 8-bit integer value used to initialize each vector element of the
+/// result.
+/// \returns An initialized 64-bit integer vector of [8 x i8].
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set1_pi8(char __b)
+{
+ return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b);
+}
+
+/// \brief Constructs a 64-bit integer vector, initialized in reverse order with
+/// the specified 32-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __i0
+/// A 32-bit integer value used to initialize the lower 32 bits of the
+/// result.
+/// \param __i1
+/// A 32-bit integer value used to initialize the upper 32 bits of the
+/// result.
+/// \returns An initialized 64-bit integer vector.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setr_pi32(int __i0, int __i1)
+{
+ return _mm_set_pi32(__i1, __i0);
+}
+
+/// \brief Constructs a 64-bit integer vector, initialized in reverse order with
+/// the specified 16-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __w0
+/// A 16-bit integer value used to initialize bits [15:0] of the result.
+/// \param __w1
+/// A 16-bit integer value used to initialize bits [31:16] of the result.
+/// \param __w2
+/// A 16-bit integer value used to initialize bits [47:32] of the result.
+/// \param __w3
+/// A 16-bit integer value used to initialize bits [63:48] of the result.
+/// \returns An initialized 64-bit integer vector.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setr_pi16(short __w0, short __w1, short __w2, short __w3)
+{
+ return _mm_set_pi16(__w3, __w2, __w1, __w0);
+}
+
+/// \brief Constructs a 64-bit integer vector, initialized in reverse order with
+/// the specified 8-bit integer values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __b0
+/// An 8-bit integer value used to initialize bits [7:0] of the result.
+/// \param __b1
+/// An 8-bit integer value used to initialize bits [15:8] of the result.
+/// \param __b2
+/// An 8-bit integer value used to initialize bits [23:16] of the result.
+/// \param __b3
+/// An 8-bit integer value used to initialize bits [31:24] of the result.
+/// \param __b4
+/// An 8-bit integer value used to initialize bits [39:32] of the result.
+/// \param __b5
+/// An 8-bit integer value used to initialize bits [47:40] of the result.
+/// \param __b6
+/// An 8-bit integer value used to initialize bits [55:48] of the result.
+/// \param __b7
+/// An 8-bit integer value used to initialize bits [63:56] of the result.
+/// \returns An initialized 64-bit integer vector.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
+ char __b6, char __b7)
+{
+ return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+/* Aliases for compatibility. */
+#define _m_empty _mm_empty
+#define _m_from_int _mm_cvtsi32_si64
+#define _m_from_int64 _mm_cvtsi64_m64
+#define _m_to_int _mm_cvtsi64_si32
+#define _m_to_int64 _mm_cvtm64_si64
+#define _m_packsswb _mm_packs_pi16
+#define _m_packssdw _mm_packs_pi32
+#define _m_packuswb _mm_packs_pu16
+#define _m_punpckhbw _mm_unpackhi_pi8
+#define _m_punpckhwd _mm_unpackhi_pi16
+#define _m_punpckhdq _mm_unpackhi_pi32
+#define _m_punpcklbw _mm_unpacklo_pi8
+#define _m_punpcklwd _mm_unpacklo_pi16
+#define _m_punpckldq _mm_unpacklo_pi32
+#define _m_paddb _mm_add_pi8
+#define _m_paddw _mm_add_pi16
+#define _m_paddd _mm_add_pi32
+#define _m_paddsb _mm_adds_pi8
+#define _m_paddsw _mm_adds_pi16
+#define _m_paddusb _mm_adds_pu8
+#define _m_paddusw _mm_adds_pu16
+#define _m_psubb _mm_sub_pi8
+#define _m_psubw _mm_sub_pi16
+#define _m_psubd _mm_sub_pi32
+#define _m_psubsb _mm_subs_pi8
+#define _m_psubsw _mm_subs_pi16
+#define _m_psubusb _mm_subs_pu8
+#define _m_psubusw _mm_subs_pu16
+#define _m_pmaddwd _mm_madd_pi16
+#define _m_pmulhw _mm_mulhi_pi16
+#define _m_pmullw _mm_mullo_pi16
+#define _m_psllw _mm_sll_pi16
+#define _m_psllwi _mm_slli_pi16
+#define _m_pslld _mm_sll_pi32
+#define _m_pslldi _mm_slli_pi32
+#define _m_psllq _mm_sll_si64
+#define _m_psllqi _mm_slli_si64
+#define _m_psraw _mm_sra_pi16
+#define _m_psrawi _mm_srai_pi16
+#define _m_psrad _mm_sra_pi32
+#define _m_psradi _mm_srai_pi32
+#define _m_psrlw _mm_srl_pi16
+#define _m_psrlwi _mm_srli_pi16
+#define _m_psrld _mm_srl_pi32
+#define _m_psrldi _mm_srli_pi32
+#define _m_psrlq _mm_srl_si64
+#define _m_psrlqi _mm_srli_si64
+#define _m_pand _mm_and_si64
+#define _m_pandn _mm_andnot_si64
+#define _m_por _mm_or_si64
+#define _m_pxor _mm_xor_si64
+#define _m_pcmpeqb _mm_cmpeq_pi8
+#define _m_pcmpeqw _mm_cmpeq_pi16
+#define _m_pcmpeqd _mm_cmpeq_pi32
+#define _m_pcmpgtb _mm_cmpgt_pi8
+#define _m_pcmpgtw _mm_cmpgt_pi16
+#define _m_pcmpgtd _mm_cmpgt_pi32
+
+#endif /* __MMINTRIN_H */
+
diff --git a/current/clang-include/module.modulemap b/current/clang-include/module.modulemap
new file mode 100644
index 0000000..3e40d2c
--- /dev/null
+++ b/current/clang-include/module.modulemap
@@ -0,0 +1,164 @@
+/*===---- module.modulemap - intrinsics module map -------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+module _Builtin_intrinsics [system] [extern_c] {
+ explicit module altivec {
+ requires altivec
+ header "altivec.h"
+ }
+
+ explicit module arm {
+ requires arm
+
+ explicit module acle {
+ header "arm_acle.h"
+ export *
+ }
+
+ explicit module neon {
+ requires neon
+ header "arm_neon.h"
+ export *
+ }
+ }
+
+ explicit module intel {
+ requires x86
+ export *
+
+ header "immintrin.h"
+ textual header "f16cintrin.h"
+ textual header "avxintrin.h"
+ textual header "avx2intrin.h"
+ textual header "avx512fintrin.h"
+ textual header "avx512erintrin.h"
+ textual header "fmaintrin.h"
+
+ header "x86intrin.h"
+ textual header "bmiintrin.h"
+ textual header "bmi2intrin.h"
+ textual header "lzcntintrin.h"
+ textual header "xopintrin.h"
+ textual header "fma4intrin.h"
+ textual header "mwaitxintrin.h"
+
+ explicit module mm_malloc {
+ header "mm_malloc.h"
+ export * // note: for <stdlib.h> dependency
+ }
+
+ explicit module cpuid {
+ header "cpuid.h"
+ }
+
+ explicit module mmx {
+ header "mmintrin.h"
+ }
+
+ explicit module sse {
+ export mm_malloc
+ export mmx
+ export sse2 // note: for hackish <emmintrin.h> dependency
+ header "xmmintrin.h"
+ }
+
+ explicit module sse2 {
+ export sse
+ header "emmintrin.h"
+ }
+
+ explicit module sse3 {
+ export sse2
+ header "pmmintrin.h"
+ }
+
+ explicit module ssse3 {
+ export sse3
+ header "tmmintrin.h"
+ }
+
+ explicit module sse4_1 {
+ export ssse3
+ header "smmintrin.h"
+ }
+
+ explicit module sse4_2 {
+ export sse4_1
+ header "nmmintrin.h"
+ }
+
+ explicit module sse4a {
+ export sse3
+ header "ammintrin.h"
+ }
+
+ explicit module popcnt {
+ header "popcntintrin.h"
+ }
+
+ explicit module mm3dnow {
+ header "mm3dnow.h"
+ }
+
+ explicit module aes_pclmul {
+ header "wmmintrin.h"
+ export aes
+ export pclmul
+ }
+
+ explicit module aes {
+ header "__wmmintrin_aes.h"
+ }
+
+ explicit module pclmul {
+ header "__wmmintrin_pclmul.h"
+ }
+ }
+
+ explicit module systemz {
+ requires systemz
+ export *
+
+ header "s390intrin.h"
+
+ explicit module htm {
+ requires htm
+ header "htmintrin.h"
+ header "htmxlintrin.h"
+ }
+
+ explicit module zvector {
+ requires zvector, vx
+ header "vecintrin.h"
+ }
+ }
+}
+
+module _Builtin_stddef_max_align_t [system] [extern_c] {
+ header "__stddef_max_align_t.h"
+}
+
+module opencl_c {
+ requires opencl
+ header "opencl-c.h"
+}
diff --git a/current/clang-include/mwaitxintrin.h b/current/clang-include/mwaitxintrin.h
new file mode 100644
index 0000000..635f2ac
--- /dev/null
+++ b/current/clang-include/mwaitxintrin.h
@@ -0,0 +1,47 @@
+/*===---- mwaitxintrin.h - MONITORX/MWAITX intrinsics ----------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <mwaitxintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef _MWAITXINTRIN_H
+#define _MWAITXINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx")))
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_monitorx(void const * __p, unsigned __extensions, unsigned __hints)
+{
+ __builtin_ia32_monitorx((void *)__p, __extensions, __hints);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock)
+{
+ __builtin_ia32_mwaitx(__extensions, __hints, __clock);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* _MWAITXINTRIN_H */
diff --git a/current/clang-include/nmmintrin.h b/current/clang-include/nmmintrin.h
new file mode 100644
index 0000000..57fec15
--- /dev/null
+++ b/current/clang-include/nmmintrin.h
@@ -0,0 +1,30 @@
+/*===---- nmmintrin.h - SSE4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _NMMINTRIN_H
+#define _NMMINTRIN_H
+
+/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,
+ just include it now then. */
+#include <smmintrin.h>
+#endif /* _NMMINTRIN_H */
diff --git a/current/clang-include/opencl-c.h b/current/clang-include/opencl-c.h
new file mode 100644
index 0000000..8029274
--- /dev/null
+++ b/current/clang-include/opencl-c.h
@@ -0,0 +1,16962 @@
+//===--- opencl-c.h - OpenCL C language builtin function header -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _OPENCL_H_
+#define _OPENCL_H_
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#ifndef cl_khr_depth_images
+#define cl_khr_depth_images
+#endif //cl_khr_depth_images
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#define __ovld __attribute__((overloadable))
+
+// Optimizations
+#define __purefn __attribute__((pure))
+#define __cnfn __attribute__((const))
+
+// built-in scalar data types:
+
+/**
+ * An unsigned 8-bit integer.
+ */
+typedef unsigned char uchar;
+
+/**
+ * An unsigned 16-bit integer.
+ */
+typedef unsigned short ushort;
+
+/**
+ * An unsigned 32-bit integer.
+ */
+typedef unsigned int uint;
+
+/**
+ * An unsigned 64-bit integer.
+ */
+typedef unsigned long ulong;
+
+/**
+ * The unsigned integer type of the result of the sizeof operator. This
+ * is a 32-bit unsigned integer if CL_DEVICE_ADDRESS_BITS
+ * defined in table 4.3 is 32-bits and is a 64-bit unsigned integer if
+ * CL_DEVICE_ADDRESS_BITS is 64-bits.
+ */
+typedef __SIZE_TYPE__ size_t;
+
+/**
+ * A signed integer type that is the result of subtracting two pointers.
+ * This is a 32-bit signed integer if CL_DEVICE_ADDRESS_BITS
+ * defined in table 4.3 is 32-bits and is a 64-bit signed integer if
+ * CL_DEVICE_ADDRESS_BITS is 64-bits.
+ */
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+
+/**
+* A signed integer type with the property that any valid pointer to
+* void can be converted to this type, then converted back to pointer
+* to void, and the result will compare equal to the original pointer.
+*/
+typedef __INTPTR_TYPE__ intptr_t;
+
+/**
+* An unsigned integer type with the property that any valid pointer to
+* void can be converted to this type, then converted back to pointer
+* to void, and the result will compare equal to the original pointer.
+*/
+typedef __UINTPTR_TYPE__ uintptr_t;
+
+// built-in vector data types:
+typedef char char2 __attribute__((ext_vector_type(2)));
+typedef char char3 __attribute__((ext_vector_type(3)));
+typedef char char4 __attribute__((ext_vector_type(4)));
+typedef char char8 __attribute__((ext_vector_type(8)));
+typedef char char16 __attribute__((ext_vector_type(16)));
+typedef uchar uchar2 __attribute__((ext_vector_type(2)));
+typedef uchar uchar3 __attribute__((ext_vector_type(3)));
+typedef uchar uchar4 __attribute__((ext_vector_type(4)));
+typedef uchar uchar8 __attribute__((ext_vector_type(8)));
+typedef uchar uchar16 __attribute__((ext_vector_type(16)));
+typedef short short2 __attribute__((ext_vector_type(2)));
+typedef short short3 __attribute__((ext_vector_type(3)));
+typedef short short4 __attribute__((ext_vector_type(4)));
+typedef short short8 __attribute__((ext_vector_type(8)));
+typedef short short16 __attribute__((ext_vector_type(16)));
+typedef ushort ushort2 __attribute__((ext_vector_type(2)));
+typedef ushort ushort3 __attribute__((ext_vector_type(3)));
+typedef ushort ushort4 __attribute__((ext_vector_type(4)));
+typedef ushort ushort8 __attribute__((ext_vector_type(8)));
+typedef ushort ushort16 __attribute__((ext_vector_type(16)));
+typedef int int2 __attribute__((ext_vector_type(2)));
+typedef int int3 __attribute__((ext_vector_type(3)));
+typedef int int4 __attribute__((ext_vector_type(4)));
+typedef int int8 __attribute__((ext_vector_type(8)));
+typedef int int16 __attribute__((ext_vector_type(16)));
+typedef uint uint2 __attribute__((ext_vector_type(2)));
+typedef uint uint3 __attribute__((ext_vector_type(3)));
+typedef uint uint4 __attribute__((ext_vector_type(4)));
+typedef uint uint8 __attribute__((ext_vector_type(8)));
+typedef uint uint16 __attribute__((ext_vector_type(16)));
+typedef long long2 __attribute__((ext_vector_type(2)));
+typedef long long3 __attribute__((ext_vector_type(3)));
+typedef long long4 __attribute__((ext_vector_type(4)));
+typedef long long8 __attribute__((ext_vector_type(8)));
+typedef long long16 __attribute__((ext_vector_type(16)));
+typedef ulong ulong2 __attribute__((ext_vector_type(2)));
+typedef ulong ulong3 __attribute__((ext_vector_type(3)));
+typedef ulong ulong4 __attribute__((ext_vector_type(4)));
+typedef ulong ulong8 __attribute__((ext_vector_type(8)));
+typedef ulong ulong16 __attribute__((ext_vector_type(16)));
+typedef float float2 __attribute__((ext_vector_type(2)));
+typedef float float3 __attribute__((ext_vector_type(3)));
+typedef float float4 __attribute__((ext_vector_type(4)));
+typedef float float8 __attribute__((ext_vector_type(8)));
+typedef float float16 __attribute__((ext_vector_type(16)));
+#ifdef cl_khr_fp16
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+typedef half half2 __attribute__((ext_vector_type(2)));
+typedef half half3 __attribute__((ext_vector_type(3)));
+typedef half half4 __attribute__((ext_vector_type(4)));
+typedef half half8 __attribute__((ext_vector_type(8)));
+typedef half half16 __attribute__((ext_vector_type(16)));
+#endif
+#ifdef cl_khr_fp64
+#if __OPENCL_C_VERSION__ < CL_VERSION_1_2
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
+#endif
+typedef double double2 __attribute__((ext_vector_type(2)));
+typedef double double3 __attribute__((ext_vector_type(3)));
+typedef double double4 __attribute__((ext_vector_type(4)));
+typedef double double8 __attribute__((ext_vector_type(8)));
+typedef double double16 __attribute__((ext_vector_type(16)));
+#endif
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#define NULL ((void*)0)
+#endif
+
+/**
+ * Value of maximum non-infinite single-precision floating-point
+ * number.
+ */
+#define MAXFLOAT 0x1.fffffep127f
+
+/**
+ * A positive float constant expression. HUGE_VALF evaluates
+ * to +infinity. Used as an error value returned by the built-in
+ * math functions.
+ */
+#define HUGE_VALF (__builtin_huge_valf())
+
+/**
+ * A positive double constant expression. HUGE_VAL evaluates
+ * to +infinity. Used as an error value returned by the built-in
+ * math functions.
+ */
+#define HUGE_VAL (__builtin_huge_val())
+
+/**
+ * A constant expression of type float representing positive or
+ * unsigned infinity.
+ */
+#define INFINITY (__builtin_inff())
+
+/**
+ * A constant expression of type float representing a quiet NaN.
+ */
+#define NAN as_float(INT_MAX)
+
+#define FP_ILOGB0 INT_MIN
+#define FP_ILOGBNAN INT_MAX
+
+#define FLT_DIG 6
+#define FLT_MANT_DIG 24
+#define FLT_MAX_10_EXP +38
+#define FLT_MAX_EXP +128
+#define FLT_MIN_10_EXP -37
+#define FLT_MIN_EXP -125
+#define FLT_RADIX 2
+#define FLT_MAX 0x1.fffffep127f
+#define FLT_MIN 0x1.0p-126f
+#define FLT_EPSILON 0x1.0p-23f
+
+#define M_E_F 2.71828182845904523536028747135266250f
+#define M_LOG2E_F 1.44269504088896340735992468100189214f
+#define M_LOG10E_F 0.434294481903251827651128918916605082f
+#define M_LN2_F 0.693147180559945309417232121458176568f
+#define M_LN10_F 2.30258509299404568401799145468436421f
+#define M_PI_F 3.14159265358979323846264338327950288f
+#define M_PI_2_F 1.57079632679489661923132169163975144f
+#define M_PI_4_F 0.785398163397448309615660845819875721f
+#define M_1_PI_F 0.318309886183790671537767526745028724f
+#define M_2_PI_F 0.636619772367581343075535053490057448f
+#define M_2_SQRTPI_F 1.12837916709551257389615890312154517f
+#define M_SQRT2_F 1.41421356237309504880168872420969808f
+#define M_SQRT1_2_F 0.707106781186547524400844362104849039f
+
+#define DBL_DIG 15
+#define DBL_MANT_DIG 53
+#define DBL_MAX_10_EXP +308
+#define DBL_MAX_EXP +1024
+#define DBL_MIN_10_EXP -307
+#define DBL_MIN_EXP -1021
+#define DBL_RADIX 2
+#define DBL_MAX 0x1.fffffffffffffp1023
+#define DBL_MIN 0x1.0p-1022
+#define DBL_EPSILON 0x1.0p-52
+
+#define M_E 0x1.5bf0a8b145769p+1
+#define M_LOG2E 0x1.71547652b82fep+0
+#define M_LOG10E 0x1.bcb7b1526e50ep-2
+#define M_LN2 0x1.62e42fefa39efp-1
+#define M_LN10 0x1.26bb1bbb55516p+1
+#define M_PI 0x1.921fb54442d18p+1
+#define M_PI_2 0x1.921fb54442d18p+0
+#define M_PI_4 0x1.921fb54442d18p-1
+#define M_1_PI 0x1.45f306dc9c883p-2
+#define M_2_PI 0x1.45f306dc9c883p-1
+#define M_2_SQRTPI 0x1.20dd750429b6dp+0
+#define M_SQRT2 0x1.6a09e667f3bcdp+0
+#define M_SQRT1_2 0x1.6a09e667f3bcdp-1
+
+#ifdef cl_khr_fp16
+
+#define HALF_DIG 3
+#define HALF_MANT_DIG 11
+#define HALF_MAX_10_EXP +4
+#define HALF_MAX_EXP +16
+#define HALF_MIN_10_EXP -4
+#define HALF_MIN_EXP -13
+#define HALF_RADIX 2
+#define HALF_MAX ((0x1.ffcp15h))
+#define HALF_MIN ((0x1.0p-14h))
+#define HALF_EPSILON ((0x1.0p-10h))
+
+#define M_E_H 2.71828182845904523536028747135266250h
+#define M_LOG2E_H 1.44269504088896340735992468100189214h
+#define M_LOG10E_H 0.434294481903251827651128918916605082h
+#define M_LN2_H 0.693147180559945309417232121458176568h
+#define M_LN10_H 2.30258509299404568401799145468436421h
+#define M_PI_H 3.14159265358979323846264338327950288h
+#define M_PI_2_H 1.57079632679489661923132169163975144h
+#define M_PI_4_H 0.785398163397448309615660845819875721h
+#define M_1_PI_H 0.318309886183790671537767526745028724h
+#define M_2_PI_H 0.636619772367581343075535053490057448h
+#define M_2_SQRTPI_H 1.12837916709551257389615890312154517h
+#define M_SQRT2_H 1.41421356237309504880168872420969808h
+#define M_SQRT1_2_H 0.707106781186547524400844362104849039h
+
+#endif //cl_khr_fp16
+
+#define CHAR_BIT 8
+#define SCHAR_MAX 127
+#define SCHAR_MIN (-128)
+#define UCHAR_MAX 255
+#define CHAR_MAX SCHAR_MAX
+#define CHAR_MIN SCHAR_MIN
+#define USHRT_MAX 65535
+#define SHRT_MAX 32767
+#define SHRT_MIN (-32768)
+#define UINT_MAX 0xffffffff
+#define INT_MAX 2147483647
+#define INT_MIN (-2147483647-1)
+#define ULONG_MAX 0xffffffffffffffffUL
+#define LONG_MAX 0x7fffffffffffffffL
+#define LONG_MIN (-0x7fffffffffffffffL-1)
+
+// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions
+
+char __ovld __cnfn convert_char_rte(char);
+char __ovld __cnfn convert_char_sat_rte(char);
+char __ovld __cnfn convert_char_rtz(char);
+char __ovld __cnfn convert_char_sat_rtz(char);
+char __ovld __cnfn convert_char_rtp(char);
+char __ovld __cnfn convert_char_sat_rtp(char);
+char __ovld __cnfn convert_char_rtn(char);
+char __ovld __cnfn convert_char_sat_rtn(char);
+char __ovld __cnfn convert_char(char);
+char __ovld __cnfn convert_char_sat(char);
+char __ovld __cnfn convert_char_rte(uchar);
+char __ovld __cnfn convert_char_sat_rte(uchar);
+char __ovld __cnfn convert_char_rtz(uchar);
+char __ovld __cnfn convert_char_sat_rtz(uchar);
+char __ovld __cnfn convert_char_rtp(uchar);
+char __ovld __cnfn convert_char_sat_rtp(uchar);
+char __ovld __cnfn convert_char_rtn(uchar);
+char __ovld __cnfn convert_char_sat_rtn(uchar);
+char __ovld __cnfn convert_char(uchar);
+char __ovld __cnfn convert_char_sat(uchar);
+char __ovld __cnfn convert_char_rte(short);
+char __ovld __cnfn convert_char_sat_rte(short);
+char __ovld __cnfn convert_char_rtz(short);
+char __ovld __cnfn convert_char_sat_rtz(short);
+char __ovld __cnfn convert_char_rtp(short);
+char __ovld __cnfn convert_char_sat_rtp(short);
+char __ovld __cnfn convert_char_rtn(short);
+char __ovld __cnfn convert_char_sat_rtn(short);
+char __ovld __cnfn convert_char(short);
+char __ovld __cnfn convert_char_sat(short);
+char __ovld __cnfn convert_char_rte(ushort);
+char __ovld __cnfn convert_char_sat_rte(ushort);
+char __ovld __cnfn convert_char_rtz(ushort);
+char __ovld __cnfn convert_char_sat_rtz(ushort);
+char __ovld __cnfn convert_char_rtp(ushort);
+char __ovld __cnfn convert_char_sat_rtp(ushort);
+char __ovld __cnfn convert_char_rtn(ushort);
+char __ovld __cnfn convert_char_sat_rtn(ushort);
+char __ovld __cnfn convert_char(ushort);
+char __ovld __cnfn convert_char_sat(ushort);
+char __ovld __cnfn convert_char_rte(int);
+char __ovld __cnfn convert_char_sat_rte(int);
+char __ovld __cnfn convert_char_rtz(int);
+char __ovld __cnfn convert_char_sat_rtz(int);
+char __ovld __cnfn convert_char_rtp(int);
+char __ovld __cnfn convert_char_sat_rtp(int);
+char __ovld __cnfn convert_char_rtn(int);
+char __ovld __cnfn convert_char_sat_rtn(int);
+char __ovld __cnfn convert_char(int);
+char __ovld __cnfn convert_char_sat(int);
+char __ovld __cnfn convert_char_rte(uint);
+char __ovld __cnfn convert_char_sat_rte(uint);
+char __ovld __cnfn convert_char_rtz(uint);
+char __ovld __cnfn convert_char_sat_rtz(uint);
+char __ovld __cnfn convert_char_rtp(uint);
+char __ovld __cnfn convert_char_sat_rtp(uint);
+char __ovld __cnfn convert_char_rtn(uint);
+char __ovld __cnfn convert_char_sat_rtn(uint);
+char __ovld __cnfn convert_char(uint);
+char __ovld __cnfn convert_char_sat(uint);
+char __ovld __cnfn convert_char_rte(long);
+char __ovld __cnfn convert_char_sat_rte(long);
+char __ovld __cnfn convert_char_rtz(long);
+char __ovld __cnfn convert_char_sat_rtz(long);
+char __ovld __cnfn convert_char_rtp(long);
+char __ovld __cnfn convert_char_sat_rtp(long);
+char __ovld __cnfn convert_char_rtn(long);
+char __ovld __cnfn convert_char_sat_rtn(long);
+char __ovld __cnfn convert_char(long);
+char __ovld __cnfn convert_char_sat(long);
+char __ovld __cnfn convert_char_rte(ulong);
+char __ovld __cnfn convert_char_sat_rte(ulong);
+char __ovld __cnfn convert_char_rtz(ulong);
+char __ovld __cnfn convert_char_sat_rtz(ulong);
+char __ovld __cnfn convert_char_rtp(ulong);
+char __ovld __cnfn convert_char_sat_rtp(ulong);
+char __ovld __cnfn convert_char_rtn(ulong);
+char __ovld __cnfn convert_char_sat_rtn(ulong);
+char __ovld __cnfn convert_char(ulong);
+char __ovld __cnfn convert_char_sat(ulong);
+char __ovld __cnfn convert_char_rte(float);
+char __ovld __cnfn convert_char_sat_rte(float);
+char __ovld __cnfn convert_char_rtz(float);
+char __ovld __cnfn convert_char_sat_rtz(float);
+char __ovld __cnfn convert_char_rtp(float);
+char __ovld __cnfn convert_char_sat_rtp(float);
+char __ovld __cnfn convert_char_rtn(float);
+char __ovld __cnfn convert_char_sat_rtn(float);
+char __ovld __cnfn convert_char(float);
+char __ovld __cnfn convert_char_sat(float);
+uchar __ovld __cnfn convert_uchar_rte(char);
+uchar __ovld __cnfn convert_uchar_sat_rte(char);
+uchar __ovld __cnfn convert_uchar_rtz(char);
+uchar __ovld __cnfn convert_uchar_sat_rtz(char);
+uchar __ovld __cnfn convert_uchar_rtp(char);
+uchar __ovld __cnfn convert_uchar_sat_rtp(char);
+uchar __ovld __cnfn convert_uchar_rtn(char);
+uchar __ovld __cnfn convert_uchar_sat_rtn(char);
+uchar __ovld __cnfn convert_uchar(char);
+uchar __ovld __cnfn convert_uchar_sat(char);
+uchar __ovld __cnfn convert_uchar_rte(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rte(uchar);
+uchar __ovld __cnfn convert_uchar_rtz(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtz(uchar);
+uchar __ovld __cnfn convert_uchar_rtp(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtp(uchar);
+uchar __ovld __cnfn convert_uchar_rtn(uchar);
+uchar __ovld __cnfn convert_uchar_sat_rtn(uchar);
+uchar __ovld __cnfn convert_uchar(uchar);
+uchar __ovld __cnfn convert_uchar_sat(uchar);
+uchar __ovld __cnfn convert_uchar_rte(short);
+uchar __ovld __cnfn convert_uchar_sat_rte(short);
+uchar __ovld __cnfn convert_uchar_rtz(short);
+uchar __ovld __cnfn convert_uchar_sat_rtz(short);
+uchar __ovld __cnfn convert_uchar_rtp(short);
+uchar __ovld __cnfn convert_uchar_sat_rtp(short);
+uchar __ovld __cnfn convert_uchar_rtn(short);
+uchar __ovld __cnfn convert_uchar_sat_rtn(short);
+uchar __ovld __cnfn convert_uchar(short);
+uchar __ovld __cnfn convert_uchar_sat(short);
+uchar __ovld __cnfn convert_uchar_rte(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rte(ushort);
+uchar __ovld __cnfn convert_uchar_rtz(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtz(ushort);
+uchar __ovld __cnfn convert_uchar_rtp(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtp(ushort);
+uchar __ovld __cnfn convert_uchar_rtn(ushort);
+uchar __ovld __cnfn convert_uchar_sat_rtn(ushort);
+uchar __ovld __cnfn convert_uchar(ushort);
+uchar __ovld __cnfn convert_uchar_sat(ushort);
+uchar __ovld __cnfn convert_uchar_rte(int);
+uchar __ovld __cnfn convert_uchar_sat_rte(int);
+uchar __ovld __cnfn convert_uchar_rtz(int);
+uchar __ovld __cnfn convert_uchar_sat_rtz(int);
+uchar __ovld __cnfn convert_uchar_rtp(int);
+uchar __ovld __cnfn convert_uchar_sat_rtp(int);
+uchar __ovld __cnfn convert_uchar_rtn(int);
+uchar __ovld __cnfn convert_uchar_sat_rtn(int);
+uchar __ovld __cnfn convert_uchar(int);
+uchar __ovld __cnfn convert_uchar_sat(int);
+uchar __ovld __cnfn convert_uchar_rte(uint);
+uchar __ovld __cnfn convert_uchar_sat_rte(uint);
+uchar __ovld __cnfn convert_uchar_rtz(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtz(uint);
+uchar __ovld __cnfn convert_uchar_rtp(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtp(uint);
+uchar __ovld __cnfn convert_uchar_rtn(uint);
+uchar __ovld __cnfn convert_uchar_sat_rtn(uint);
+uchar __ovld __cnfn convert_uchar(uint);
+uchar __ovld __cnfn convert_uchar_sat(uint);
+uchar __ovld __cnfn convert_uchar_rte(long);
+uchar __ovld __cnfn convert_uchar_sat_rte(long);
+uchar __ovld __cnfn convert_uchar_rtz(long);
+uchar __ovld __cnfn convert_uchar_sat_rtz(long);
+uchar __ovld __cnfn convert_uchar_rtp(long);
+uchar __ovld __cnfn convert_uchar_sat_rtp(long);
+uchar __ovld __cnfn convert_uchar_rtn(long);
+uchar __ovld __cnfn convert_uchar_sat_rtn(long);
+uchar __ovld __cnfn convert_uchar(long);
+uchar __ovld __cnfn convert_uchar_sat(long);
+uchar __ovld __cnfn convert_uchar_rte(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rte(ulong);
+uchar __ovld __cnfn convert_uchar_rtz(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtz(ulong);
+uchar __ovld __cnfn convert_uchar_rtp(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtp(ulong);
+uchar __ovld __cnfn convert_uchar_rtn(ulong);
+uchar __ovld __cnfn convert_uchar_sat_rtn(ulong);
+uchar __ovld __cnfn convert_uchar(ulong);
+uchar __ovld __cnfn convert_uchar_sat(ulong);
+uchar __ovld __cnfn convert_uchar_rte(float);
+uchar __ovld __cnfn convert_uchar_sat_rte(float);
+uchar __ovld __cnfn convert_uchar_rtz(float);
+uchar __ovld __cnfn convert_uchar_sat_rtz(float);
+uchar __ovld __cnfn convert_uchar_rtp(float);
+uchar __ovld __cnfn convert_uchar_sat_rtp(float);
+uchar __ovld __cnfn convert_uchar_rtn(float);
+uchar __ovld __cnfn convert_uchar_sat_rtn(float);
+uchar __ovld __cnfn convert_uchar(float);
+uchar __ovld __cnfn convert_uchar_sat(float);
+
+short __ovld __cnfn convert_short_rte(char);
+short __ovld __cnfn convert_short_sat_rte(char);
+short __ovld __cnfn convert_short_rtz(char);
+short __ovld __cnfn convert_short_sat_rtz(char);
+short __ovld __cnfn convert_short_rtp(char);
+short __ovld __cnfn convert_short_sat_rtp(char);
+short __ovld __cnfn convert_short_rtn(char);
+short __ovld __cnfn convert_short_sat_rtn(char);
+short __ovld __cnfn convert_short(char);
+short __ovld __cnfn convert_short_sat(char);
+short __ovld __cnfn convert_short_rte(uchar);
+short __ovld __cnfn convert_short_sat_rte(uchar);
+short __ovld __cnfn convert_short_rtz(uchar);
+short __ovld __cnfn convert_short_sat_rtz(uchar);
+short __ovld __cnfn convert_short_rtp(uchar);
+short __ovld __cnfn convert_short_sat_rtp(uchar);
+short __ovld __cnfn convert_short_rtn(uchar);
+short __ovld __cnfn convert_short_sat_rtn(uchar);
+short __ovld __cnfn convert_short(uchar);
+short __ovld __cnfn convert_short_sat(uchar);
+short __ovld __cnfn convert_short_rte(short);
+short __ovld __cnfn convert_short_sat_rte(short);
+short __ovld __cnfn convert_short_rtz(short);
+short __ovld __cnfn convert_short_sat_rtz(short);
+short __ovld __cnfn convert_short_rtp(short);
+short __ovld __cnfn convert_short_sat_rtp(short);
+short __ovld __cnfn convert_short_rtn(short);
+short __ovld __cnfn convert_short_sat_rtn(short);
+short __ovld __cnfn convert_short(short);
+short __ovld __cnfn convert_short_sat(short);
+short __ovld __cnfn convert_short_rte(ushort);
+short __ovld __cnfn convert_short_sat_rte(ushort);
+short __ovld __cnfn convert_short_rtz(ushort);
+short __ovld __cnfn convert_short_sat_rtz(ushort);
+short __ovld __cnfn convert_short_rtp(ushort);
+short __ovld __cnfn convert_short_sat_rtp(ushort);
+short __ovld __cnfn convert_short_rtn(ushort);
+short __ovld __cnfn convert_short_sat_rtn(ushort);
+short __ovld __cnfn convert_short(ushort);
+short __ovld __cnfn convert_short_sat(ushort);
+short __ovld __cnfn convert_short_rte(int);
+short __ovld __cnfn convert_short_sat_rte(int);
+short __ovld __cnfn convert_short_rtz(int);
+short __ovld __cnfn convert_short_sat_rtz(int);
+short __ovld __cnfn convert_short_rtp(int);
+short __ovld __cnfn convert_short_sat_rtp(int);
+short __ovld __cnfn convert_short_rtn(int);
+short __ovld __cnfn convert_short_sat_rtn(int);
+short __ovld __cnfn convert_short(int);
+short __ovld __cnfn convert_short_sat(int);
+short __ovld __cnfn convert_short_rte(uint);
+short __ovld __cnfn convert_short_sat_rte(uint);
+short __ovld __cnfn convert_short_rtz(uint);
+short __ovld __cnfn convert_short_sat_rtz(uint);
+short __ovld __cnfn convert_short_rtp(uint);
+short __ovld __cnfn convert_short_sat_rtp(uint);
+short __ovld __cnfn convert_short_rtn(uint);
+short __ovld __cnfn convert_short_sat_rtn(uint);
+short __ovld __cnfn convert_short(uint);
+short __ovld __cnfn convert_short_sat(uint);
+short __ovld __cnfn convert_short_rte(long);
+short __ovld __cnfn convert_short_sat_rte(long);
+short __ovld __cnfn convert_short_rtz(long);
+short __ovld __cnfn convert_short_sat_rtz(long);
+short __ovld __cnfn convert_short_rtp(long);
+short __ovld __cnfn convert_short_sat_rtp(long);
+short __ovld __cnfn convert_short_rtn(long);
+short __ovld __cnfn convert_short_sat_rtn(long);
+short __ovld __cnfn convert_short(long);
+short __ovld __cnfn convert_short_sat(long);
+short __ovld __cnfn convert_short_rte(ulong);
+short __ovld __cnfn convert_short_sat_rte(ulong);
+short __ovld __cnfn convert_short_rtz(ulong);
+short __ovld __cnfn convert_short_sat_rtz(ulong);
+short __ovld __cnfn convert_short_rtp(ulong);
+short __ovld __cnfn convert_short_sat_rtp(ulong);
+short __ovld __cnfn convert_short_rtn(ulong);
+short __ovld __cnfn convert_short_sat_rtn(ulong);
+short __ovld __cnfn convert_short(ulong);
+short __ovld __cnfn convert_short_sat(ulong);
+short __ovld __cnfn convert_short_rte(float);
+short __ovld __cnfn convert_short_sat_rte(float);
+short __ovld __cnfn convert_short_rtz(float);
+short __ovld __cnfn convert_short_sat_rtz(float);
+short __ovld __cnfn convert_short_rtp(float);
+short __ovld __cnfn convert_short_sat_rtp(float);
+short __ovld __cnfn convert_short_rtn(float);
+short __ovld __cnfn convert_short_sat_rtn(float);
+short __ovld __cnfn convert_short(float);
+short __ovld __cnfn convert_short_sat(float);
+ushort __ovld __cnfn convert_ushort_rte(char);
+ushort __ovld __cnfn convert_ushort_sat_rte(char);
+ushort __ovld __cnfn convert_ushort_rtz(char);
+ushort __ovld __cnfn convert_ushort_sat_rtz(char);
+ushort __ovld __cnfn convert_ushort_rtp(char);
+ushort __ovld __cnfn convert_ushort_sat_rtp(char);
+ushort __ovld __cnfn convert_ushort_rtn(char);
+ushort __ovld __cnfn convert_ushort_sat_rtn(char);
+ushort __ovld __cnfn convert_ushort(char);
+ushort __ovld __cnfn convert_ushort_sat(char);
+ushort __ovld __cnfn convert_ushort_rte(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rte(uchar);
+ushort __ovld __cnfn convert_ushort_rtz(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtz(uchar);
+ushort __ovld __cnfn convert_ushort_rtp(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtp(uchar);
+ushort __ovld __cnfn convert_ushort_rtn(uchar);
+ushort __ovld __cnfn convert_ushort_sat_rtn(uchar);
+ushort __ovld __cnfn convert_ushort(uchar);
+ushort __ovld __cnfn convert_ushort_sat(uchar);
+ushort __ovld __cnfn convert_ushort_rte(short);
+ushort __ovld __cnfn convert_ushort_sat_rte(short);
+ushort __ovld __cnfn convert_ushort_rtz(short);
+ushort __ovld __cnfn convert_ushort_sat_rtz(short);
+ushort __ovld __cnfn convert_ushort_rtp(short);
+ushort __ovld __cnfn convert_ushort_sat_rtp(short);
+ushort __ovld __cnfn convert_ushort_rtn(short);
+ushort __ovld __cnfn convert_ushort_sat_rtn(short);
+ushort __ovld __cnfn convert_ushort(short);
+ushort __ovld __cnfn convert_ushort_sat(short);
+ushort __ovld __cnfn convert_ushort_rte(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rte(ushort);
+ushort __ovld __cnfn convert_ushort_rtz(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtz(ushort);
+ushort __ovld __cnfn convert_ushort_rtp(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtp(ushort);
+ushort __ovld __cnfn convert_ushort_rtn(ushort);
+ushort __ovld __cnfn convert_ushort_sat_rtn(ushort);
+ushort __ovld __cnfn convert_ushort(ushort);
+ushort __ovld __cnfn convert_ushort_sat(ushort);
+ushort __ovld __cnfn convert_ushort_rte(int);
+ushort __ovld __cnfn convert_ushort_sat_rte(int);
+ushort __ovld __cnfn convert_ushort_rtz(int);
+ushort __ovld __cnfn convert_ushort_sat_rtz(int);
+ushort __ovld __cnfn convert_ushort_rtp(int);
+ushort __ovld __cnfn convert_ushort_sat_rtp(int);
+ushort __ovld __cnfn convert_ushort_rtn(int);
+ushort __ovld __cnfn convert_ushort_sat_rtn(int);
+ushort __ovld __cnfn convert_ushort(int);
+ushort __ovld __cnfn convert_ushort_sat(int);
+ushort __ovld __cnfn convert_ushort_rte(uint);
+ushort __ovld __cnfn convert_ushort_sat_rte(uint);
+ushort __ovld __cnfn convert_ushort_rtz(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtz(uint);
+ushort __ovld __cnfn convert_ushort_rtp(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtp(uint);
+ushort __ovld __cnfn convert_ushort_rtn(uint);
+ushort __ovld __cnfn convert_ushort_sat_rtn(uint);
+ushort __ovld __cnfn convert_ushort(uint);
+ushort __ovld __cnfn convert_ushort_sat(uint);
+ushort __ovld __cnfn convert_ushort_rte(long);
+ushort __ovld __cnfn convert_ushort_sat_rte(long);
+ushort __ovld __cnfn convert_ushort_rtz(long);
+ushort __ovld __cnfn convert_ushort_sat_rtz(long);
+ushort __ovld __cnfn convert_ushort_rtp(long);
+ushort __ovld __cnfn convert_ushort_sat_rtp(long);
+ushort __ovld __cnfn convert_ushort_rtn(long);
+ushort __ovld __cnfn convert_ushort_sat_rtn(long);
+ushort __ovld __cnfn convert_ushort(long);
+ushort __ovld __cnfn convert_ushort_sat(long);
+ushort __ovld __cnfn convert_ushort_rte(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rte(ulong);
+ushort __ovld __cnfn convert_ushort_rtz(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtz(ulong);
+ushort __ovld __cnfn convert_ushort_rtp(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtp(ulong);
+ushort __ovld __cnfn convert_ushort_rtn(ulong);
+ushort __ovld __cnfn convert_ushort_sat_rtn(ulong);
+ushort __ovld __cnfn convert_ushort(ulong);
+ushort __ovld __cnfn convert_ushort_sat(ulong);
+ushort __ovld __cnfn convert_ushort_rte(float);
+ushort __ovld __cnfn convert_ushort_sat_rte(float);
+ushort __ovld __cnfn convert_ushort_rtz(float);
+ushort __ovld __cnfn convert_ushort_sat_rtz(float);
+ushort __ovld __cnfn convert_ushort_rtp(float);
+ushort __ovld __cnfn convert_ushort_sat_rtp(float);
+ushort __ovld __cnfn convert_ushort_rtn(float);
+ushort __ovld __cnfn convert_ushort_sat_rtn(float);
+ushort __ovld __cnfn convert_ushort(float);
+ushort __ovld __cnfn convert_ushort_sat(float);
+int __ovld __cnfn convert_int_rte(char);
+int __ovld __cnfn convert_int_sat_rte(char);
+int __ovld __cnfn convert_int_rtz(char);
+int __ovld __cnfn convert_int_sat_rtz(char);
+int __ovld __cnfn convert_int_rtp(char);
+int __ovld __cnfn convert_int_sat_rtp(char);
+int __ovld __cnfn convert_int_rtn(char);
+int __ovld __cnfn convert_int_sat_rtn(char);
+int __ovld __cnfn convert_int(char);
+int __ovld __cnfn convert_int_sat(char);
+int __ovld __cnfn convert_int_rte(uchar);
+int __ovld __cnfn convert_int_sat_rte(uchar);
+int __ovld __cnfn convert_int_rtz(uchar);
+int __ovld __cnfn convert_int_sat_rtz(uchar);
+int __ovld __cnfn convert_int_rtp(uchar);
+int __ovld __cnfn convert_int_sat_rtp(uchar);
+int __ovld __cnfn convert_int_rtn(uchar);
+int __ovld __cnfn convert_int_sat_rtn(uchar);
+int __ovld __cnfn convert_int(uchar);
+int __ovld __cnfn convert_int_sat(uchar);
+int __ovld __cnfn convert_int_rte(short);
+int __ovld __cnfn convert_int_sat_rte(short);
+int __ovld __cnfn convert_int_rtz(short);
+int __ovld __cnfn convert_int_sat_rtz(short);
+int __ovld __cnfn convert_int_rtp(short);
+int __ovld __cnfn convert_int_sat_rtp(short);
+int __ovld __cnfn convert_int_rtn(short);
+int __ovld __cnfn convert_int_sat_rtn(short);
+int __ovld __cnfn convert_int(short);
+int __ovld __cnfn convert_int_sat(short);
+int __ovld __cnfn convert_int_rte(ushort);
+int __ovld __cnfn convert_int_sat_rte(ushort);
+int __ovld __cnfn convert_int_rtz(ushort);
+int __ovld __cnfn convert_int_sat_rtz(ushort);
+int __ovld __cnfn convert_int_rtp(ushort);
+int __ovld __cnfn convert_int_sat_rtp(ushort);
+int __ovld __cnfn convert_int_rtn(ushort);
+int __ovld __cnfn convert_int_sat_rtn(ushort);
+int __ovld __cnfn convert_int(ushort);
+int __ovld __cnfn convert_int_sat(ushort);
+int __ovld __cnfn convert_int_rte(int);
+int __ovld __cnfn convert_int_sat_rte(int);
+int __ovld __cnfn convert_int_rtz(int);
+int __ovld __cnfn convert_int_sat_rtz(int);
+int __ovld __cnfn convert_int_rtp(int);
+int __ovld __cnfn convert_int_sat_rtp(int);
+int __ovld __cnfn convert_int_rtn(int);
+int __ovld __cnfn convert_int_sat_rtn(int);
+int __ovld __cnfn convert_int(int);
+int __ovld __cnfn convert_int_sat(int);
+int __ovld __cnfn convert_int_rte(uint);
+int __ovld __cnfn convert_int_sat_rte(uint);
+int __ovld __cnfn convert_int_rtz(uint);
+int __ovld __cnfn convert_int_sat_rtz(uint);
+int __ovld __cnfn convert_int_rtp(uint);
+int __ovld __cnfn convert_int_sat_rtp(uint);
+int __ovld __cnfn convert_int_rtn(uint);
+int __ovld __cnfn convert_int_sat_rtn(uint);
+int __ovld __cnfn convert_int(uint);
+int __ovld __cnfn convert_int_sat(uint);
+int __ovld __cnfn convert_int_rte(long);
+int __ovld __cnfn convert_int_sat_rte(long);
+int __ovld __cnfn convert_int_rtz(long);
+int __ovld __cnfn convert_int_sat_rtz(long);
+int __ovld __cnfn convert_int_rtp(long);
+int __ovld __cnfn convert_int_sat_rtp(long);
+int __ovld __cnfn convert_int_rtn(long);
+int __ovld __cnfn convert_int_sat_rtn(long);
+int __ovld __cnfn convert_int(long);
+int __ovld __cnfn convert_int_sat(long);
+int __ovld __cnfn convert_int_rte(ulong);
+int __ovld __cnfn convert_int_sat_rte(ulong);
+int __ovld __cnfn convert_int_rtz(ulong);
+int __ovld __cnfn convert_int_sat_rtz(ulong);
+int __ovld __cnfn convert_int_rtp(ulong);
+int __ovld __cnfn convert_int_sat_rtp(ulong);
+int __ovld __cnfn convert_int_rtn(ulong);
+int __ovld __cnfn convert_int_sat_rtn(ulong);
+int __ovld __cnfn convert_int(ulong);
+int __ovld __cnfn convert_int_sat(ulong);
+int __ovld __cnfn convert_int_rte(float);
+int __ovld __cnfn convert_int_sat_rte(float);
+int __ovld __cnfn convert_int_rtz(float);
+int __ovld __cnfn convert_int_sat_rtz(float);
+int __ovld __cnfn convert_int_rtp(float);
+int __ovld __cnfn convert_int_sat_rtp(float);
+int __ovld __cnfn convert_int_rtn(float);
+int __ovld __cnfn convert_int_sat_rtn(float);
+int __ovld __cnfn convert_int(float);
+int __ovld __cnfn convert_int_sat(float);
+uint __ovld __cnfn convert_uint_rte(char);
+uint __ovld __cnfn convert_uint_sat_rte(char);
+uint __ovld __cnfn convert_uint_rtz(char);
+uint __ovld __cnfn convert_uint_sat_rtz(char);
+uint __ovld __cnfn convert_uint_rtp(char);
+uint __ovld __cnfn convert_uint_sat_rtp(char);
+uint __ovld __cnfn convert_uint_rtn(char);
+uint __ovld __cnfn convert_uint_sat_rtn(char);
+uint __ovld __cnfn convert_uint(char);
+uint __ovld __cnfn convert_uint_sat(char);
+uint __ovld __cnfn convert_uint_rte(uchar);
+uint __ovld __cnfn convert_uint_sat_rte(uchar);
+uint __ovld __cnfn convert_uint_rtz(uchar);
+uint __ovld __cnfn convert_uint_sat_rtz(uchar);
+uint __ovld __cnfn convert_uint_rtp(uchar);
+uint __ovld __cnfn convert_uint_sat_rtp(uchar);
+uint __ovld __cnfn convert_uint_rtn(uchar);
+uint __ovld __cnfn convert_uint_sat_rtn(uchar);
+uint __ovld __cnfn convert_uint(uchar);
+uint __ovld __cnfn convert_uint_sat(uchar);
+uint __ovld __cnfn convert_uint_rte(short);
+uint __ovld __cnfn convert_uint_sat_rte(short);
+uint __ovld __cnfn convert_uint_rtz(short);
+uint __ovld __cnfn convert_uint_sat_rtz(short);
+uint __ovld __cnfn convert_uint_rtp(short);
+uint __ovld __cnfn convert_uint_sat_rtp(short);
+uint __ovld __cnfn convert_uint_rtn(short);
+uint __ovld __cnfn convert_uint_sat_rtn(short);
+uint __ovld __cnfn convert_uint(short);
+uint __ovld __cnfn convert_uint_sat(short);
+uint __ovld __cnfn convert_uint_rte(ushort);
+uint __ovld __cnfn convert_uint_sat_rte(ushort);
+uint __ovld __cnfn convert_uint_rtz(ushort);
+uint __ovld __cnfn convert_uint_sat_rtz(ushort);
+uint __ovld __cnfn convert_uint_rtp(ushort);
+uint __ovld __cnfn convert_uint_sat_rtp(ushort);
+uint __ovld __cnfn convert_uint_rtn(ushort);
+uint __ovld __cnfn convert_uint_sat_rtn(ushort);
+uint __ovld __cnfn convert_uint(ushort);
+uint __ovld __cnfn convert_uint_sat(ushort);
+uint __ovld __cnfn convert_uint_rte(int);
+uint __ovld __cnfn convert_uint_sat_rte(int);
+uint __ovld __cnfn convert_uint_rtz(int);
+uint __ovld __cnfn convert_uint_sat_rtz(int);
+uint __ovld __cnfn convert_uint_rtp(int);
+uint __ovld __cnfn convert_uint_sat_rtp(int);
+uint __ovld __cnfn convert_uint_rtn(int);
+uint __ovld __cnfn convert_uint_sat_rtn(int);
+uint __ovld __cnfn convert_uint(int);
+uint __ovld __cnfn convert_uint_sat(int);
+uint __ovld __cnfn convert_uint_rte(uint);
+uint __ovld __cnfn convert_uint_sat_rte(uint);
+uint __ovld __cnfn convert_uint_rtz(uint);
+uint __ovld __cnfn convert_uint_sat_rtz(uint);
+uint __ovld __cnfn convert_uint_rtp(uint);
+uint __ovld __cnfn convert_uint_sat_rtp(uint);
+uint __ovld __cnfn convert_uint_rtn(uint);
+uint __ovld __cnfn convert_uint_sat_rtn(uint);
+uint __ovld __cnfn convert_uint(uint);
+uint __ovld __cnfn convert_uint_sat(uint);
+uint __ovld __cnfn convert_uint_rte(long);
+uint __ovld __cnfn convert_uint_sat_rte(long);
+uint __ovld __cnfn convert_uint_rtz(long);
+uint __ovld __cnfn convert_uint_sat_rtz(long);
+uint __ovld __cnfn convert_uint_rtp(long);
+uint __ovld __cnfn convert_uint_sat_rtp(long);
+uint __ovld __cnfn convert_uint_rtn(long);
+uint __ovld __cnfn convert_uint_sat_rtn(long);
+uint __ovld __cnfn convert_uint(long);
+uint __ovld __cnfn convert_uint_sat(long);
+uint __ovld __cnfn convert_uint_rte(ulong);
+uint __ovld __cnfn convert_uint_sat_rte(ulong);
+uint __ovld __cnfn convert_uint_rtz(ulong);
+uint __ovld __cnfn convert_uint_sat_rtz(ulong);
+uint __ovld __cnfn convert_uint_rtp(ulong);
+uint __ovld __cnfn convert_uint_sat_rtp(ulong);
+uint __ovld __cnfn convert_uint_rtn(ulong);
+uint __ovld __cnfn convert_uint_sat_rtn(ulong);
+uint __ovld __cnfn convert_uint(ulong);
+uint __ovld __cnfn convert_uint_sat(ulong);
+uint __ovld __cnfn convert_uint_rte(float);
+uint __ovld __cnfn convert_uint_sat_rte(float);
+uint __ovld __cnfn convert_uint_rtz(float);
+uint __ovld __cnfn convert_uint_sat_rtz(float);
+uint __ovld __cnfn convert_uint_rtp(float);
+uint __ovld __cnfn convert_uint_sat_rtp(float);
+uint __ovld __cnfn convert_uint_rtn(float);
+uint __ovld __cnfn convert_uint_sat_rtn(float);
+uint __ovld __cnfn convert_uint(float);
+uint __ovld __cnfn convert_uint_sat(float);
+long __ovld __cnfn convert_long_rte(char);
+long __ovld __cnfn convert_long_sat_rte(char);
+long __ovld __cnfn convert_long_rtz(char);
+long __ovld __cnfn convert_long_sat_rtz(char);
+long __ovld __cnfn convert_long_rtp(char);
+long __ovld __cnfn convert_long_sat_rtp(char);
+long __ovld __cnfn convert_long_rtn(char);
+long __ovld __cnfn convert_long_sat_rtn(char);
+long __ovld __cnfn convert_long(char);
+long __ovld __cnfn convert_long_sat(char);
+long __ovld __cnfn convert_long_rte(uchar);
+long __ovld __cnfn convert_long_sat_rte(uchar);
+long __ovld __cnfn convert_long_rtz(uchar);
+long __ovld __cnfn convert_long_sat_rtz(uchar);
+long __ovld __cnfn convert_long_rtp(uchar);
+long __ovld __cnfn convert_long_sat_rtp(uchar);
+long __ovld __cnfn convert_long_rtn(uchar);
+long __ovld __cnfn convert_long_sat_rtn(uchar);
+long __ovld __cnfn convert_long(uchar);
+long __ovld __cnfn convert_long_sat(uchar);
+long __ovld __cnfn convert_long_rte(short);
+long __ovld __cnfn convert_long_sat_rte(short);
+long __ovld __cnfn convert_long_rtz(short);
+long __ovld __cnfn convert_long_sat_rtz(short);
+long __ovld __cnfn convert_long_rtp(short);
+long __ovld __cnfn convert_long_sat_rtp(short);
+long __ovld __cnfn convert_long_rtn(short);
+long __ovld __cnfn convert_long_sat_rtn(short);
+long __ovld __cnfn convert_long(short);
+long __ovld __cnfn convert_long_sat(short);
+long __ovld __cnfn convert_long_rte(ushort);
+long __ovld __cnfn convert_long_sat_rte(ushort);
+long __ovld __cnfn convert_long_rtz(ushort);
+long __ovld __cnfn convert_long_sat_rtz(ushort);
+long __ovld __cnfn convert_long_rtp(ushort);
+long __ovld __cnfn convert_long_sat_rtp(ushort);
+long __ovld __cnfn convert_long_rtn(ushort);
+long __ovld __cnfn convert_long_sat_rtn(ushort);
+long __ovld __cnfn convert_long(ushort);
+long __ovld __cnfn convert_long_sat(ushort);
+long __ovld __cnfn convert_long_rte(int);
+long __ovld __cnfn convert_long_sat_rte(int);
+long __ovld __cnfn convert_long_rtz(int);
+long __ovld __cnfn convert_long_sat_rtz(int);
+long __ovld __cnfn convert_long_rtp(int);
+long __ovld __cnfn convert_long_sat_rtp(int);
+long __ovld __cnfn convert_long_rtn(int);
+long __ovld __cnfn convert_long_sat_rtn(int);
+long __ovld __cnfn convert_long(int);
+long __ovld __cnfn convert_long_sat(int);
+long __ovld __cnfn convert_long_rte(uint);
+long __ovld __cnfn convert_long_sat_rte(uint);
+long __ovld __cnfn convert_long_rtz(uint);
+long __ovld __cnfn convert_long_sat_rtz(uint);
+long __ovld __cnfn convert_long_rtp(uint);
+long __ovld __cnfn convert_long_sat_rtp(uint);
+long __ovld __cnfn convert_long_rtn(uint);
+long __ovld __cnfn convert_long_sat_rtn(uint);
+long __ovld __cnfn convert_long(uint);
+long __ovld __cnfn convert_long_sat(uint);
+long __ovld __cnfn convert_long_rte(long);
+long __ovld __cnfn convert_long_sat_rte(long);
+long __ovld __cnfn convert_long_rtz(long);
+long __ovld __cnfn convert_long_sat_rtz(long);
+long __ovld __cnfn convert_long_rtp(long);
+long __ovld __cnfn convert_long_sat_rtp(long);
+long __ovld __cnfn convert_long_rtn(long);
+long __ovld __cnfn convert_long_sat_rtn(long);
+long __ovld __cnfn convert_long(long);
+long __ovld __cnfn convert_long_sat(long);
+long __ovld __cnfn convert_long_rte(ulong);
+long __ovld __cnfn convert_long_sat_rte(ulong);
+long __ovld __cnfn convert_long_rtz(ulong);
+long __ovld __cnfn convert_long_sat_rtz(ulong);
+long __ovld __cnfn convert_long_rtp(ulong);
+long __ovld __cnfn convert_long_sat_rtp(ulong);
+long __ovld __cnfn convert_long_rtn(ulong);
+long __ovld __cnfn convert_long_sat_rtn(ulong);
+long __ovld __cnfn convert_long(ulong);
+long __ovld __cnfn convert_long_sat(ulong);
+long __ovld __cnfn convert_long_rte(float);
+long __ovld __cnfn convert_long_sat_rte(float);
+long __ovld __cnfn convert_long_rtz(float);
+long __ovld __cnfn convert_long_sat_rtz(float);
+long __ovld __cnfn convert_long_rtp(float);
+long __ovld __cnfn convert_long_sat_rtp(float);
+long __ovld __cnfn convert_long_rtn(float);
+long __ovld __cnfn convert_long_sat_rtn(float);
+long __ovld __cnfn convert_long(float);
+long __ovld __cnfn convert_long_sat(float);
+ulong __ovld __cnfn convert_ulong_rte(char);
+ulong __ovld __cnfn convert_ulong_sat_rte(char);
+ulong __ovld __cnfn convert_ulong_rtz(char);
+ulong __ovld __cnfn convert_ulong_sat_rtz(char);
+ulong __ovld __cnfn convert_ulong_rtp(char);
+ulong __ovld __cnfn convert_ulong_sat_rtp(char);
+ulong __ovld __cnfn convert_ulong_rtn(char);
+ulong __ovld __cnfn convert_ulong_sat_rtn(char);
+ulong __ovld __cnfn convert_ulong(char);
+ulong __ovld __cnfn convert_ulong_sat(char);
+ulong __ovld __cnfn convert_ulong_rte(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rte(uchar);
+ulong __ovld __cnfn convert_ulong_rtz(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtz(uchar);
+ulong __ovld __cnfn convert_ulong_rtp(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtp(uchar);
+ulong __ovld __cnfn convert_ulong_rtn(uchar);
+ulong __ovld __cnfn convert_ulong_sat_rtn(uchar);
+ulong __ovld __cnfn convert_ulong(uchar);
+ulong __ovld __cnfn convert_ulong_sat(uchar);
+ulong __ovld __cnfn convert_ulong_rte(short);
+ulong __ovld __cnfn convert_ulong_sat_rte(short);
+ulong __ovld __cnfn convert_ulong_rtz(short);
+ulong __ovld __cnfn convert_ulong_sat_rtz(short);
+ulong __ovld __cnfn convert_ulong_rtp(short);
+ulong __ovld __cnfn convert_ulong_sat_rtp(short);
+ulong __ovld __cnfn convert_ulong_rtn(short);
+ulong __ovld __cnfn convert_ulong_sat_rtn(short);
+ulong __ovld __cnfn convert_ulong(short);
+ulong __ovld __cnfn convert_ulong_sat(short);
+ulong __ovld __cnfn convert_ulong_rte(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rte(ushort);
+ulong __ovld __cnfn convert_ulong_rtz(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtz(ushort);
+ulong __ovld __cnfn convert_ulong_rtp(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtp(ushort);
+ulong __ovld __cnfn convert_ulong_rtn(ushort);
+ulong __ovld __cnfn convert_ulong_sat_rtn(ushort);
+ulong __ovld __cnfn convert_ulong(ushort);
+ulong __ovld __cnfn convert_ulong_sat(ushort);
+ulong __ovld __cnfn convert_ulong_rte(int);
+ulong __ovld __cnfn convert_ulong_sat_rte(int);
+ulong __ovld __cnfn convert_ulong_rtz(int);
+ulong __ovld __cnfn convert_ulong_sat_rtz(int);
+ulong __ovld __cnfn convert_ulong_rtp(int);
+ulong __ovld __cnfn convert_ulong_sat_rtp(int);
+ulong __ovld __cnfn convert_ulong_rtn(int);
+ulong __ovld __cnfn convert_ulong_sat_rtn(int);
+ulong __ovld __cnfn convert_ulong(int);
+ulong __ovld __cnfn convert_ulong_sat(int);
+ulong __ovld __cnfn convert_ulong_rte(uint);
+ulong __ovld __cnfn convert_ulong_sat_rte(uint);
+ulong __ovld __cnfn convert_ulong_rtz(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtz(uint);
+ulong __ovld __cnfn convert_ulong_rtp(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtp(uint);
+ulong __ovld __cnfn convert_ulong_rtn(uint);
+ulong __ovld __cnfn convert_ulong_sat_rtn(uint);
+ulong __ovld __cnfn convert_ulong(uint);
+ulong __ovld __cnfn convert_ulong_sat(uint);
+ulong __ovld __cnfn convert_ulong_rte(long);
+ulong __ovld __cnfn convert_ulong_sat_rte(long);
+ulong __ovld __cnfn convert_ulong_rtz(long);
+ulong __ovld __cnfn convert_ulong_sat_rtz(long);
+ulong __ovld __cnfn convert_ulong_rtp(long);
+ulong __ovld __cnfn convert_ulong_sat_rtp(long);
+ulong __ovld __cnfn convert_ulong_rtn(long);
+ulong __ovld __cnfn convert_ulong_sat_rtn(long);
+ulong __ovld __cnfn convert_ulong(long);
+ulong __ovld __cnfn convert_ulong_sat(long);
+ulong __ovld __cnfn convert_ulong_rte(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rte(ulong);
+ulong __ovld __cnfn convert_ulong_rtz(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtz(ulong);
+ulong __ovld __cnfn convert_ulong_rtp(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtp(ulong);
+ulong __ovld __cnfn convert_ulong_rtn(ulong);
+ulong __ovld __cnfn convert_ulong_sat_rtn(ulong);
+ulong __ovld __cnfn convert_ulong(ulong);
+ulong __ovld __cnfn convert_ulong_sat(ulong);
+ulong __ovld __cnfn convert_ulong_rte(float);
+ulong __ovld __cnfn convert_ulong_sat_rte(float);
+ulong __ovld __cnfn convert_ulong_rtz(float);
+ulong __ovld __cnfn convert_ulong_sat_rtz(float);
+ulong __ovld __cnfn convert_ulong_rtp(float);
+ulong __ovld __cnfn convert_ulong_sat_rtp(float);
+ulong __ovld __cnfn convert_ulong_rtn(float);
+ulong __ovld __cnfn convert_ulong_sat_rtn(float);
+ulong __ovld __cnfn convert_ulong(float);
+ulong __ovld __cnfn convert_ulong_sat(float);
+float __ovld __cnfn convert_float_rte(char);
+float __ovld __cnfn convert_float_rtz(char);
+float __ovld __cnfn convert_float_rtp(char);
+float __ovld __cnfn convert_float_rtn(char);
+float __ovld __cnfn convert_float(char);
+float __ovld __cnfn convert_float_rte(uchar);
+float __ovld __cnfn convert_float_rtz(uchar);
+float __ovld __cnfn convert_float_rtp(uchar);
+float __ovld __cnfn convert_float_rtn(uchar);
+float __ovld __cnfn convert_float(uchar);
+float __ovld __cnfn convert_float_rte(short);
+float __ovld __cnfn convert_float_rtz(short);
+float __ovld __cnfn convert_float_rtp(short);
+float __ovld __cnfn convert_float_rtn(short);
+float __ovld __cnfn convert_float(short);
+float __ovld __cnfn convert_float_rte(ushort);
+float __ovld __cnfn convert_float_rtz(ushort);
+float __ovld __cnfn convert_float_rtp(ushort);
+float __ovld __cnfn convert_float_rtn(ushort);
+float __ovld __cnfn convert_float(ushort);
+float __ovld __cnfn convert_float_rte(int);
+float __ovld __cnfn convert_float_rtz(int);
+float __ovld __cnfn convert_float_rtp(int);
+float __ovld __cnfn convert_float_rtn(int);
+float __ovld __cnfn convert_float(int);
+float __ovld __cnfn convert_float_rte(uint);
+float __ovld __cnfn convert_float_rtz(uint);
+float __ovld __cnfn convert_float_rtp(uint);
+float __ovld __cnfn convert_float_rtn(uint);
+float __ovld __cnfn convert_float(uint);
+float __ovld __cnfn convert_float_rte(long);
+float __ovld __cnfn convert_float_rtz(long);
+float __ovld __cnfn convert_float_rtp(long);
+float __ovld __cnfn convert_float_rtn(long);
+float __ovld __cnfn convert_float(long);
+float __ovld __cnfn convert_float_rte(ulong);
+float __ovld __cnfn convert_float_rtz(ulong);
+float __ovld __cnfn convert_float_rtp(ulong);
+float __ovld __cnfn convert_float_rtn(ulong);
+float __ovld __cnfn convert_float(ulong);
+float __ovld __cnfn convert_float_rte(float);
+float __ovld __cnfn convert_float_rtz(float);
+float __ovld __cnfn convert_float_rtp(float);
+float __ovld __cnfn convert_float_rtn(float);
+float __ovld __cnfn convert_float(float);
+char2 __ovld __cnfn convert_char2_rte(char2);
+char2 __ovld __cnfn convert_char2_sat_rte(char2);
+char2 __ovld __cnfn convert_char2_rtz(char2);
+char2 __ovld __cnfn convert_char2_sat_rtz(char2);
+char2 __ovld __cnfn convert_char2_rtp(char2);
+char2 __ovld __cnfn convert_char2_sat_rtp(char2);
+char2 __ovld __cnfn convert_char2_rtn(char2);
+char2 __ovld __cnfn convert_char2_sat_rtn(char2);
+char2 __ovld __cnfn convert_char2(char2);
+char2 __ovld __cnfn convert_char2_sat(char2);
+char2 __ovld __cnfn convert_char2_rte(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rte(uchar2);
+char2 __ovld __cnfn convert_char2_rtz(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtz(uchar2);
+char2 __ovld __cnfn convert_char2_rtp(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtp(uchar2);
+char2 __ovld __cnfn convert_char2_rtn(uchar2);
+char2 __ovld __cnfn convert_char2_sat_rtn(uchar2);
+char2 __ovld __cnfn convert_char2(uchar2);
+char2 __ovld __cnfn convert_char2_sat(uchar2);
+char2 __ovld __cnfn convert_char2_rte(short2);
+char2 __ovld __cnfn convert_char2_sat_rte(short2);
+char2 __ovld __cnfn convert_char2_rtz(short2);
+char2 __ovld __cnfn convert_char2_sat_rtz(short2);
+char2 __ovld __cnfn convert_char2_rtp(short2);
+char2 __ovld __cnfn convert_char2_sat_rtp(short2);
+char2 __ovld __cnfn convert_char2_rtn(short2);
+char2 __ovld __cnfn convert_char2_sat_rtn(short2);
+char2 __ovld __cnfn convert_char2(short2);
+char2 __ovld __cnfn convert_char2_sat(short2);
+char2 __ovld __cnfn convert_char2_rte(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rte(ushort2);
+char2 __ovld __cnfn convert_char2_rtz(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtz(ushort2);
+char2 __ovld __cnfn convert_char2_rtp(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtp(ushort2);
+char2 __ovld __cnfn convert_char2_rtn(ushort2);
+char2 __ovld __cnfn convert_char2_sat_rtn(ushort2);
+char2 __ovld __cnfn convert_char2(ushort2);
+char2 __ovld __cnfn convert_char2_sat(ushort2);
+char2 __ovld __cnfn convert_char2_rte(int2);
+char2 __ovld __cnfn convert_char2_sat_rte(int2);
+char2 __ovld __cnfn convert_char2_rtz(int2);
+char2 __ovld __cnfn convert_char2_sat_rtz(int2);
+char2 __ovld __cnfn convert_char2_rtp(int2);
+char2 __ovld __cnfn convert_char2_sat_rtp(int2);
+char2 __ovld __cnfn convert_char2_rtn(int2);
+char2 __ovld __cnfn convert_char2_sat_rtn(int2);
+char2 __ovld __cnfn convert_char2(int2);
+char2 __ovld __cnfn convert_char2_sat(int2);
+char2 __ovld __cnfn convert_char2_rte(uint2);
+char2 __ovld __cnfn convert_char2_sat_rte(uint2);
+char2 __ovld __cnfn convert_char2_rtz(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtz(uint2);
+char2 __ovld __cnfn convert_char2_rtp(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtp(uint2);
+char2 __ovld __cnfn convert_char2_rtn(uint2);
+char2 __ovld __cnfn convert_char2_sat_rtn(uint2);
+char2 __ovld __cnfn convert_char2(uint2);
+char2 __ovld __cnfn convert_char2_sat(uint2);
+char2 __ovld __cnfn convert_char2_rte(long2);
+char2 __ovld __cnfn convert_char2_sat_rte(long2);
+char2 __ovld __cnfn convert_char2_rtz(long2);
+char2 __ovld __cnfn convert_char2_sat_rtz(long2);
+char2 __ovld __cnfn convert_char2_rtp(long2);
+char2 __ovld __cnfn convert_char2_sat_rtp(long2);
+char2 __ovld __cnfn convert_char2_rtn(long2);
+char2 __ovld __cnfn convert_char2_sat_rtn(long2);
+char2 __ovld __cnfn convert_char2(long2);
+char2 __ovld __cnfn convert_char2_sat(long2);
+char2 __ovld __cnfn convert_char2_rte(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rte(ulong2);
+char2 __ovld __cnfn convert_char2_rtz(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtz(ulong2);
+char2 __ovld __cnfn convert_char2_rtp(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtp(ulong2);
+char2 __ovld __cnfn convert_char2_rtn(ulong2);
+char2 __ovld __cnfn convert_char2_sat_rtn(ulong2);
+char2 __ovld __cnfn convert_char2(ulong2);
+char2 __ovld __cnfn convert_char2_sat(ulong2);
+char2 __ovld __cnfn convert_char2_rte(float2);
+char2 __ovld __cnfn convert_char2_sat_rte(float2);
+char2 __ovld __cnfn convert_char2_rtz(float2);
+char2 __ovld __cnfn convert_char2_sat_rtz(float2);
+char2 __ovld __cnfn convert_char2_rtp(float2);
+char2 __ovld __cnfn convert_char2_sat_rtp(float2);
+char2 __ovld __cnfn convert_char2_rtn(float2);
+char2 __ovld __cnfn convert_char2_sat_rtn(float2);
+char2 __ovld __cnfn convert_char2(float2);
+char2 __ovld __cnfn convert_char2_sat(float2);
+uchar2 __ovld __cnfn convert_uchar2_rte(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2);
+uchar2 __ovld __cnfn convert_uchar2(char2);
+uchar2 __ovld __cnfn convert_uchar2_sat(char2);
+uchar2 __ovld __cnfn convert_uchar2_rte(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2);
+uchar2 __ovld __cnfn convert_uchar2(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_sat(uchar2);
+uchar2 __ovld __cnfn convert_uchar2_rte(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2);
+uchar2 __ovld __cnfn convert_uchar2(short2);
+uchar2 __ovld __cnfn convert_uchar2_sat(short2);
+uchar2 __ovld __cnfn convert_uchar2_rte(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2);
+uchar2 __ovld __cnfn convert_uchar2(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_sat(ushort2);
+uchar2 __ovld __cnfn convert_uchar2_rte(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2);
+uchar2 __ovld __cnfn convert_uchar2(int2);
+uchar2 __ovld __cnfn convert_uchar2_sat(int2);
+uchar2 __ovld __cnfn convert_uchar2_rte(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2);
+uchar2 __ovld __cnfn convert_uchar2(uint2);
+uchar2 __ovld __cnfn convert_uchar2_sat(uint2);
+uchar2 __ovld __cnfn convert_uchar2_rte(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2);
+uchar2 __ovld __cnfn convert_uchar2(long2);
+uchar2 __ovld __cnfn convert_uchar2_sat(long2);
+uchar2 __ovld __cnfn convert_uchar2_rte(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2);
+uchar2 __ovld __cnfn convert_uchar2(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_sat(ulong2);
+uchar2 __ovld __cnfn convert_uchar2_rte(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2);
+uchar2 __ovld __cnfn convert_uchar2(float2);
+uchar2 __ovld __cnfn convert_uchar2_sat(float2);
+short2 __ovld __cnfn convert_short2_rte(char2);
+short2 __ovld __cnfn convert_short2_sat_rte(char2);
+short2 __ovld __cnfn convert_short2_rtz(char2);
+short2 __ovld __cnfn convert_short2_sat_rtz(char2);
+short2 __ovld __cnfn convert_short2_rtp(char2);
+short2 __ovld __cnfn convert_short2_sat_rtp(char2);
+short2 __ovld __cnfn convert_short2_rtn(char2);
+short2 __ovld __cnfn convert_short2_sat_rtn(char2);
+short2 __ovld __cnfn convert_short2(char2);
+short2 __ovld __cnfn convert_short2_sat(char2);
+short2 __ovld __cnfn convert_short2_rte(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rte(uchar2);
+short2 __ovld __cnfn convert_short2_rtz(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtz(uchar2);
+short2 __ovld __cnfn convert_short2_rtp(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtp(uchar2);
+short2 __ovld __cnfn convert_short2_rtn(uchar2);
+short2 __ovld __cnfn convert_short2_sat_rtn(uchar2);
+short2 __ovld __cnfn convert_short2(uchar2);
+short2 __ovld __cnfn convert_short2_sat(uchar2);
+short2 __ovld __cnfn convert_short2_rte(short2);
+short2 __ovld __cnfn convert_short2_sat_rte(short2);
+short2 __ovld __cnfn convert_short2_rtz(short2);
+short2 __ovld __cnfn convert_short2_sat_rtz(short2);
+short2 __ovld __cnfn convert_short2_rtp(short2);
+short2 __ovld __cnfn convert_short2_sat_rtp(short2);
+short2 __ovld __cnfn convert_short2_rtn(short2);
+short2 __ovld __cnfn convert_short2_sat_rtn(short2);
+short2 __ovld __cnfn convert_short2(short2);
+short2 __ovld __cnfn convert_short2_sat(short2);
+short2 __ovld __cnfn convert_short2_rte(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rte(ushort2);
+short2 __ovld __cnfn convert_short2_rtz(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtz(ushort2);
+short2 __ovld __cnfn convert_short2_rtp(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtp(ushort2);
+short2 __ovld __cnfn convert_short2_rtn(ushort2);
+short2 __ovld __cnfn convert_short2_sat_rtn(ushort2);
+short2 __ovld __cnfn convert_short2(ushort2);
+short2 __ovld __cnfn convert_short2_sat(ushort2);
+short2 __ovld __cnfn convert_short2_rte(int2);
+short2 __ovld __cnfn convert_short2_sat_rte(int2);
+short2 __ovld __cnfn convert_short2_rtz(int2);
+short2 __ovld __cnfn convert_short2_sat_rtz(int2);
+short2 __ovld __cnfn convert_short2_rtp(int2);
+short2 __ovld __cnfn convert_short2_sat_rtp(int2);
+short2 __ovld __cnfn convert_short2_rtn(int2);
+short2 __ovld __cnfn convert_short2_sat_rtn(int2);
+short2 __ovld __cnfn convert_short2(int2);
+short2 __ovld __cnfn convert_short2_sat(int2);
+short2 __ovld __cnfn convert_short2_rte(uint2);
+short2 __ovld __cnfn convert_short2_sat_rte(uint2);
+short2 __ovld __cnfn convert_short2_rtz(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtz(uint2);
+short2 __ovld __cnfn convert_short2_rtp(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtp(uint2);
+short2 __ovld __cnfn convert_short2_rtn(uint2);
+short2 __ovld __cnfn convert_short2_sat_rtn(uint2);
+short2 __ovld __cnfn convert_short2(uint2);
+short2 __ovld __cnfn convert_short2_sat(uint2);
+short2 __ovld __cnfn convert_short2_rte(long2);
+short2 __ovld __cnfn convert_short2_sat_rte(long2);
+short2 __ovld __cnfn convert_short2_rtz(long2);
+short2 __ovld __cnfn convert_short2_sat_rtz(long2);
+short2 __ovld __cnfn convert_short2_rtp(long2);
+short2 __ovld __cnfn convert_short2_sat_rtp(long2);
+short2 __ovld __cnfn convert_short2_rtn(long2);
+short2 __ovld __cnfn convert_short2_sat_rtn(long2);
+short2 __ovld __cnfn convert_short2(long2);
+short2 __ovld __cnfn convert_short2_sat(long2);
+short2 __ovld __cnfn convert_short2_rte(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rte(ulong2);
+short2 __ovld __cnfn convert_short2_rtz(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtz(ulong2);
+short2 __ovld __cnfn convert_short2_rtp(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtp(ulong2);
+short2 __ovld __cnfn convert_short2_rtn(ulong2);
+short2 __ovld __cnfn convert_short2_sat_rtn(ulong2);
+short2 __ovld __cnfn convert_short2(ulong2);
+short2 __ovld __cnfn convert_short2_sat(ulong2);
+short2 __ovld __cnfn convert_short2_rte(float2);
+short2 __ovld __cnfn convert_short2_sat_rte(float2);
+short2 __ovld __cnfn convert_short2_rtz(float2);
+short2 __ovld __cnfn convert_short2_sat_rtz(float2);
+short2 __ovld __cnfn convert_short2_rtp(float2);
+short2 __ovld __cnfn convert_short2_sat_rtp(float2);
+short2 __ovld __cnfn convert_short2_rtn(float2);
+short2 __ovld __cnfn convert_short2_sat_rtn(float2);
+short2 __ovld __cnfn convert_short2(float2);
+short2 __ovld __cnfn convert_short2_sat(float2);
+ushort2 __ovld __cnfn convert_ushort2_rte(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2);
+ushort2 __ovld __cnfn convert_ushort2(char2);
+ushort2 __ovld __cnfn convert_ushort2_sat(char2);
+ushort2 __ovld __cnfn convert_ushort2_rte(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2);
+ushort2 __ovld __cnfn convert_ushort2(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_sat(uchar2);
+ushort2 __ovld __cnfn convert_ushort2_rte(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2);
+ushort2 __ovld __cnfn convert_ushort2(short2);
+ushort2 __ovld __cnfn convert_ushort2_sat(short2);
+ushort2 __ovld __cnfn convert_ushort2_rte(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2);
+ushort2 __ovld __cnfn convert_ushort2(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_sat(ushort2);
+ushort2 __ovld __cnfn convert_ushort2_rte(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2);
+ushort2 __ovld __cnfn convert_ushort2(int2);
+ushort2 __ovld __cnfn convert_ushort2_sat(int2);
+ushort2 __ovld __cnfn convert_ushort2_rte(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2);
+ushort2 __ovld __cnfn convert_ushort2(uint2);
+ushort2 __ovld __cnfn convert_ushort2_sat(uint2);
+ushort2 __ovld __cnfn convert_ushort2_rte(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2);
+ushort2 __ovld __cnfn convert_ushort2(long2);
+ushort2 __ovld __cnfn convert_ushort2_sat(long2);
+ushort2 __ovld __cnfn convert_ushort2_rte(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2);
+ushort2 __ovld __cnfn convert_ushort2(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_sat(ulong2);
+ushort2 __ovld __cnfn convert_ushort2_rte(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2);
+ushort2 __ovld __cnfn convert_ushort2(float2);
+ushort2 __ovld __cnfn convert_ushort2_sat(float2);
+int2 __ovld __cnfn convert_int2_rte(char2);
+int2 __ovld __cnfn convert_int2_sat_rte(char2);
+int2 __ovld __cnfn convert_int2_rtz(char2);
+int2 __ovld __cnfn convert_int2_sat_rtz(char2);
+int2 __ovld __cnfn convert_int2_rtp(char2);
+int2 __ovld __cnfn convert_int2_sat_rtp(char2);
+int2 __ovld __cnfn convert_int2_rtn(char2);
+int2 __ovld __cnfn convert_int2_sat_rtn(char2);
+int2 __ovld __cnfn convert_int2(char2);
+int2 __ovld __cnfn convert_int2_sat(char2);
+int2 __ovld __cnfn convert_int2_rte(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rte(uchar2);
+int2 __ovld __cnfn convert_int2_rtz(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtz(uchar2);
+int2 __ovld __cnfn convert_int2_rtp(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtp(uchar2);
+int2 __ovld __cnfn convert_int2_rtn(uchar2);
+int2 __ovld __cnfn convert_int2_sat_rtn(uchar2);
+int2 __ovld __cnfn convert_int2(uchar2);
+int2 __ovld __cnfn convert_int2_sat(uchar2);
+int2 __ovld __cnfn convert_int2_rte(short2);
+int2 __ovld __cnfn convert_int2_sat_rte(short2);
+int2 __ovld __cnfn convert_int2_rtz(short2);
+int2 __ovld __cnfn convert_int2_sat_rtz(short2);
+int2 __ovld __cnfn convert_int2_rtp(short2);
+int2 __ovld __cnfn convert_int2_sat_rtp(short2);
+int2 __ovld __cnfn convert_int2_rtn(short2);
+int2 __ovld __cnfn convert_int2_sat_rtn(short2);
+int2 __ovld __cnfn convert_int2(short2);
+int2 __ovld __cnfn convert_int2_sat(short2);
+int2 __ovld __cnfn convert_int2_rte(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rte(ushort2);
+int2 __ovld __cnfn convert_int2_rtz(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtz(ushort2);
+int2 __ovld __cnfn convert_int2_rtp(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtp(ushort2);
+int2 __ovld __cnfn convert_int2_rtn(ushort2);
+int2 __ovld __cnfn convert_int2_sat_rtn(ushort2);
+int2 __ovld __cnfn convert_int2(ushort2);
+int2 __ovld __cnfn convert_int2_sat(ushort2);
+int2 __ovld __cnfn convert_int2_rte(int2);
+int2 __ovld __cnfn convert_int2_sat_rte(int2);
+int2 __ovld __cnfn convert_int2_rtz(int2);
+int2 __ovld __cnfn convert_int2_sat_rtz(int2);
+int2 __ovld __cnfn convert_int2_rtp(int2);
+int2 __ovld __cnfn convert_int2_sat_rtp(int2);
+int2 __ovld __cnfn convert_int2_rtn(int2);
+int2 __ovld __cnfn convert_int2_sat_rtn(int2);
+int2 __ovld __cnfn convert_int2(int2);
+int2 __ovld __cnfn convert_int2_sat(int2);
+int2 __ovld __cnfn convert_int2_rte(uint2);
+int2 __ovld __cnfn convert_int2_sat_rte(uint2);
+int2 __ovld __cnfn convert_int2_rtz(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtz(uint2);
+int2 __ovld __cnfn convert_int2_rtp(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtp(uint2);
+int2 __ovld __cnfn convert_int2_rtn(uint2);
+int2 __ovld __cnfn convert_int2_sat_rtn(uint2);
+int2 __ovld __cnfn convert_int2(uint2);
+int2 __ovld __cnfn convert_int2_sat(uint2);
+int2 __ovld __cnfn convert_int2_rte(long2);
+int2 __ovld __cnfn convert_int2_sat_rte(long2);
+int2 __ovld __cnfn convert_int2_rtz(long2);
+int2 __ovld __cnfn convert_int2_sat_rtz(long2);
+int2 __ovld __cnfn convert_int2_rtp(long2);
+int2 __ovld __cnfn convert_int2_sat_rtp(long2);
+int2 __ovld __cnfn convert_int2_rtn(long2);
+int2 __ovld __cnfn convert_int2_sat_rtn(long2);
+int2 __ovld __cnfn convert_int2(long2);
+int2 __ovld __cnfn convert_int2_sat(long2);
+int2 __ovld __cnfn convert_int2_rte(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rte(ulong2);
+int2 __ovld __cnfn convert_int2_rtz(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtz(ulong2);
+int2 __ovld __cnfn convert_int2_rtp(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtp(ulong2);
+int2 __ovld __cnfn convert_int2_rtn(ulong2);
+int2 __ovld __cnfn convert_int2_sat_rtn(ulong2);
+int2 __ovld __cnfn convert_int2(ulong2);
+int2 __ovld __cnfn convert_int2_sat(ulong2);
+int2 __ovld __cnfn convert_int2_rte(float2);
+int2 __ovld __cnfn convert_int2_sat_rte(float2);
+int2 __ovld __cnfn convert_int2_rtz(float2);
+int2 __ovld __cnfn convert_int2_sat_rtz(float2);
+int2 __ovld __cnfn convert_int2_rtp(float2);
+int2 __ovld __cnfn convert_int2_sat_rtp(float2);
+int2 __ovld __cnfn convert_int2_rtn(float2);
+int2 __ovld __cnfn convert_int2_sat_rtn(float2);
+int2 __ovld __cnfn convert_int2(float2);
+int2 __ovld __cnfn convert_int2_sat(float2);
+uint2 __ovld __cnfn convert_uint2_rte(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(char2);
+uint2 __ovld __cnfn convert_uint2_rtz(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(char2);
+uint2 __ovld __cnfn convert_uint2_rtp(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(char2);
+uint2 __ovld __cnfn convert_uint2_rtn(char2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(char2);
+uint2 __ovld __cnfn convert_uint2(char2);
+uint2 __ovld __cnfn convert_uint2_sat(char2);
+uint2 __ovld __cnfn convert_uint2_rte(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtz(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtp(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2);
+uint2 __ovld __cnfn convert_uint2_rtn(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2);
+uint2 __ovld __cnfn convert_uint2(uchar2);
+uint2 __ovld __cnfn convert_uint2_sat(uchar2);
+uint2 __ovld __cnfn convert_uint2_rte(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(short2);
+uint2 __ovld __cnfn convert_uint2_rtz(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(short2);
+uint2 __ovld __cnfn convert_uint2_rtp(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(short2);
+uint2 __ovld __cnfn convert_uint2_rtn(short2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(short2);
+uint2 __ovld __cnfn convert_uint2(short2);
+uint2 __ovld __cnfn convert_uint2_sat(short2);
+uint2 __ovld __cnfn convert_uint2_rte(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtz(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtp(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2);
+uint2 __ovld __cnfn convert_uint2_rtn(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2);
+uint2 __ovld __cnfn convert_uint2(ushort2);
+uint2 __ovld __cnfn convert_uint2_sat(ushort2);
+uint2 __ovld __cnfn convert_uint2_rte(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(int2);
+uint2 __ovld __cnfn convert_uint2_rtz(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(int2);
+uint2 __ovld __cnfn convert_uint2_rtp(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(int2);
+uint2 __ovld __cnfn convert_uint2_rtn(int2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(int2);
+uint2 __ovld __cnfn convert_uint2(int2);
+uint2 __ovld __cnfn convert_uint2_sat(int2);
+uint2 __ovld __cnfn convert_uint2_rte(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(uint2);
+uint2 __ovld __cnfn convert_uint2_rtz(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2);
+uint2 __ovld __cnfn convert_uint2_rtp(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2);
+uint2 __ovld __cnfn convert_uint2_rtn(uint2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2);
+uint2 __ovld __cnfn convert_uint2(uint2);
+uint2 __ovld __cnfn convert_uint2_sat(uint2);
+uint2 __ovld __cnfn convert_uint2_rte(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(long2);
+uint2 __ovld __cnfn convert_uint2_rtz(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(long2);
+uint2 __ovld __cnfn convert_uint2_rtp(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(long2);
+uint2 __ovld __cnfn convert_uint2_rtn(long2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(long2);
+uint2 __ovld __cnfn convert_uint2(long2);
+uint2 __ovld __cnfn convert_uint2_sat(long2);
+uint2 __ovld __cnfn convert_uint2_rte(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtz(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtp(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2);
+uint2 __ovld __cnfn convert_uint2_rtn(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2);
+uint2 __ovld __cnfn convert_uint2(ulong2);
+uint2 __ovld __cnfn convert_uint2_sat(ulong2);
+uint2 __ovld __cnfn convert_uint2_rte(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(float2);
+uint2 __ovld __cnfn convert_uint2_rtz(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(float2);
+uint2 __ovld __cnfn convert_uint2_rtp(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(float2);
+uint2 __ovld __cnfn convert_uint2_rtn(float2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(float2);
+uint2 __ovld __cnfn convert_uint2(float2);
+uint2 __ovld __cnfn convert_uint2_sat(float2);
+long2 __ovld __cnfn convert_long2_rte(char2);
+long2 __ovld __cnfn convert_long2_sat_rte(char2);
+long2 __ovld __cnfn convert_long2_rtz(char2);
+long2 __ovld __cnfn convert_long2_sat_rtz(char2);
+long2 __ovld __cnfn convert_long2_rtp(char2);
+long2 __ovld __cnfn convert_long2_sat_rtp(char2);
+long2 __ovld __cnfn convert_long2_rtn(char2);
+long2 __ovld __cnfn convert_long2_sat_rtn(char2);
+long2 __ovld __cnfn convert_long2(char2);
+long2 __ovld __cnfn convert_long2_sat(char2);
+long2 __ovld __cnfn convert_long2_rte(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rte(uchar2);
+long2 __ovld __cnfn convert_long2_rtz(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtz(uchar2);
+long2 __ovld __cnfn convert_long2_rtp(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtp(uchar2);
+long2 __ovld __cnfn convert_long2_rtn(uchar2);
+long2 __ovld __cnfn convert_long2_sat_rtn(uchar2);
+long2 __ovld __cnfn convert_long2(uchar2);
+long2 __ovld __cnfn convert_long2_sat(uchar2);
+long2 __ovld __cnfn convert_long2_rte(short2);
+long2 __ovld __cnfn convert_long2_sat_rte(short2);
+long2 __ovld __cnfn convert_long2_rtz(short2);
+long2 __ovld __cnfn convert_long2_sat_rtz(short2);
+long2 __ovld __cnfn convert_long2_rtp(short2);
+long2 __ovld __cnfn convert_long2_sat_rtp(short2);
+long2 __ovld __cnfn convert_long2_rtn(short2);
+long2 __ovld __cnfn convert_long2_sat_rtn(short2);
+long2 __ovld __cnfn convert_long2(short2);
+long2 __ovld __cnfn convert_long2_sat(short2);
+long2 __ovld __cnfn convert_long2_rte(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rte(ushort2);
+long2 __ovld __cnfn convert_long2_rtz(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtz(ushort2);
+long2 __ovld __cnfn convert_long2_rtp(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtp(ushort2);
+long2 __ovld __cnfn convert_long2_rtn(ushort2);
+long2 __ovld __cnfn convert_long2_sat_rtn(ushort2);
+long2 __ovld __cnfn convert_long2(ushort2);
+long2 __ovld __cnfn convert_long2_sat(ushort2);
+long2 __ovld __cnfn convert_long2_rte(int2);
+long2 __ovld __cnfn convert_long2_sat_rte(int2);
+long2 __ovld __cnfn convert_long2_rtz(int2);
+long2 __ovld __cnfn convert_long2_sat_rtz(int2);
+long2 __ovld __cnfn convert_long2_rtp(int2);
+long2 __ovld __cnfn convert_long2_sat_rtp(int2);
+long2 __ovld __cnfn convert_long2_rtn(int2);
+long2 __ovld __cnfn convert_long2_sat_rtn(int2);
+long2 __ovld __cnfn convert_long2(int2);
+long2 __ovld __cnfn convert_long2_sat(int2);
+long2 __ovld __cnfn convert_long2_rte(uint2);
+long2 __ovld __cnfn convert_long2_sat_rte(uint2);
+long2 __ovld __cnfn convert_long2_rtz(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtz(uint2);
+long2 __ovld __cnfn convert_long2_rtp(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtp(uint2);
+long2 __ovld __cnfn convert_long2_rtn(uint2);
+long2 __ovld __cnfn convert_long2_sat_rtn(uint2);
+long2 __ovld __cnfn convert_long2(uint2);
+long2 __ovld __cnfn convert_long2_sat(uint2);
+long2 __ovld __cnfn convert_long2_rte(long2);
+long2 __ovld __cnfn convert_long2_sat_rte(long2);
+long2 __ovld __cnfn convert_long2_rtz(long2);
+long2 __ovld __cnfn convert_long2_sat_rtz(long2);
+long2 __ovld __cnfn convert_long2_rtp(long2);
+long2 __ovld __cnfn convert_long2_sat_rtp(long2);
+long2 __ovld __cnfn convert_long2_rtn(long2);
+long2 __ovld __cnfn convert_long2_sat_rtn(long2);
+long2 __ovld __cnfn convert_long2(long2);
+long2 __ovld __cnfn convert_long2_sat(long2);
+long2 __ovld __cnfn convert_long2_rte(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rte(ulong2);
+long2 __ovld __cnfn convert_long2_rtz(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtz(ulong2);
+long2 __ovld __cnfn convert_long2_rtp(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtp(ulong2);
+long2 __ovld __cnfn convert_long2_rtn(ulong2);
+long2 __ovld __cnfn convert_long2_sat_rtn(ulong2);
+long2 __ovld __cnfn convert_long2(ulong2);
+long2 __ovld __cnfn convert_long2_sat(ulong2);
+long2 __ovld __cnfn convert_long2_rte(float2);
+long2 __ovld __cnfn convert_long2_sat_rte(float2);
+long2 __ovld __cnfn convert_long2_rtz(float2);
+long2 __ovld __cnfn convert_long2_sat_rtz(float2);
+long2 __ovld __cnfn convert_long2_rtp(float2);
+long2 __ovld __cnfn convert_long2_sat_rtp(float2);
+long2 __ovld __cnfn convert_long2_rtn(float2);
+long2 __ovld __cnfn convert_long2_sat_rtn(float2);
+long2 __ovld __cnfn convert_long2(float2);
+long2 __ovld __cnfn convert_long2_sat(float2);
+ulong2 __ovld __cnfn convert_ulong2_rte(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2);
+ulong2 __ovld __cnfn convert_ulong2(char2);
+ulong2 __ovld __cnfn convert_ulong2_sat(char2);
+ulong2 __ovld __cnfn convert_ulong2_rte(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2);
+ulong2 __ovld __cnfn convert_ulong2(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_sat(uchar2);
+ulong2 __ovld __cnfn convert_ulong2_rte(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2);
+ulong2 __ovld __cnfn convert_ulong2(short2);
+ulong2 __ovld __cnfn convert_ulong2_sat(short2);
+ulong2 __ovld __cnfn convert_ulong2_rte(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2);
+ulong2 __ovld __cnfn convert_ulong2(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_sat(ushort2);
+ulong2 __ovld __cnfn convert_ulong2_rte(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2);
+ulong2 __ovld __cnfn convert_ulong2(int2);
+ulong2 __ovld __cnfn convert_ulong2_sat(int2);
+ulong2 __ovld __cnfn convert_ulong2_rte(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2);
+ulong2 __ovld __cnfn convert_ulong2(uint2);
+ulong2 __ovld __cnfn convert_ulong2_sat(uint2);
+ulong2 __ovld __cnfn convert_ulong2_rte(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2);
+ulong2 __ovld __cnfn convert_ulong2(long2);
+ulong2 __ovld __cnfn convert_ulong2_sat(long2);
+ulong2 __ovld __cnfn convert_ulong2_rte(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2);
+ulong2 __ovld __cnfn convert_ulong2(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_sat(ulong2);
+ulong2 __ovld __cnfn convert_ulong2_rte(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2);
+ulong2 __ovld __cnfn convert_ulong2(float2);
+ulong2 __ovld __cnfn convert_ulong2_sat(float2);
+float2 __ovld __cnfn convert_float2_rte(char2);
+float2 __ovld __cnfn convert_float2_rtz(char2);
+float2 __ovld __cnfn convert_float2_rtp(char2);
+float2 __ovld __cnfn convert_float2_rtn(char2);
+float2 __ovld __cnfn convert_float2(char2);
+float2 __ovld __cnfn convert_float2_rte(uchar2);
+float2 __ovld __cnfn convert_float2_rtz(uchar2);
+float2 __ovld __cnfn convert_float2_rtp(uchar2);
+float2 __ovld __cnfn convert_float2_rtn(uchar2);
+float2 __ovld __cnfn convert_float2(uchar2);
+float2 __ovld __cnfn convert_float2_rte(short2);
+float2 __ovld __cnfn convert_float2_rtz(short2);
+float2 __ovld __cnfn convert_float2_rtp(short2);
+float2 __ovld __cnfn convert_float2_rtn(short2);
+float2 __ovld __cnfn convert_float2(short2);
+float2 __ovld __cnfn convert_float2_rte(ushort2);
+float2 __ovld __cnfn convert_float2_rtz(ushort2);
+float2 __ovld __cnfn convert_float2_rtp(ushort2);
+float2 __ovld __cnfn convert_float2_rtn(ushort2);
+float2 __ovld __cnfn convert_float2(ushort2);
+float2 __ovld __cnfn convert_float2_rte(int2);
+float2 __ovld __cnfn convert_float2_rtz(int2);
+float2 __ovld __cnfn convert_float2_rtp(int2);
+float2 __ovld __cnfn convert_float2_rtn(int2);
+float2 __ovld __cnfn convert_float2(int2);
+float2 __ovld __cnfn convert_float2_rte(uint2);
+float2 __ovld __cnfn convert_float2_rtz(uint2);
+float2 __ovld __cnfn convert_float2_rtp(uint2);
+float2 __ovld __cnfn convert_float2_rtn(uint2);
+float2 __ovld __cnfn convert_float2(uint2);
+float2 __ovld __cnfn convert_float2_rte(long2);
+float2 __ovld __cnfn convert_float2_rtz(long2);
+float2 __ovld __cnfn convert_float2_rtp(long2);
+float2 __ovld __cnfn convert_float2_rtn(long2);
+float2 __ovld __cnfn convert_float2(long2);
+float2 __ovld __cnfn convert_float2_rte(ulong2);
+float2 __ovld __cnfn convert_float2_rtz(ulong2);
+float2 __ovld __cnfn convert_float2_rtp(ulong2);
+float2 __ovld __cnfn convert_float2_rtn(ulong2);
+float2 __ovld __cnfn convert_float2(ulong2);
+float2 __ovld __cnfn convert_float2_rte(float2);
+float2 __ovld __cnfn convert_float2_rtz(float2);
+float2 __ovld __cnfn convert_float2_rtp(float2);
+float2 __ovld __cnfn convert_float2_rtn(float2);
+float2 __ovld __cnfn convert_float2(float2);
+char3 __ovld __cnfn convert_char3_rte(char3);
+char3 __ovld __cnfn convert_char3_sat_rte(char3);
+char3 __ovld __cnfn convert_char3_rtz(char3);
+char3 __ovld __cnfn convert_char3_sat_rtz(char3);
+char3 __ovld __cnfn convert_char3_rtp(char3);
+char3 __ovld __cnfn convert_char3_sat_rtp(char3);
+char3 __ovld __cnfn convert_char3_rtn(char3);
+char3 __ovld __cnfn convert_char3_sat_rtn(char3);
+char3 __ovld __cnfn convert_char3(char3);
+char3 __ovld __cnfn convert_char3_sat(char3);
+char3 __ovld __cnfn convert_char3_rte(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rte(uchar3);
+char3 __ovld __cnfn convert_char3_rtz(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtz(uchar3);
+char3 __ovld __cnfn convert_char3_rtp(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtp(uchar3);
+char3 __ovld __cnfn convert_char3_rtn(uchar3);
+char3 __ovld __cnfn convert_char3_sat_rtn(uchar3);
+char3 __ovld __cnfn convert_char3(uchar3);
+char3 __ovld __cnfn convert_char3_sat(uchar3);
+char3 __ovld __cnfn convert_char3_rte(short3);
+char3 __ovld __cnfn convert_char3_sat_rte(short3);
+char3 __ovld __cnfn convert_char3_rtz(short3);
+char3 __ovld __cnfn convert_char3_sat_rtz(short3);
+char3 __ovld __cnfn convert_char3_rtp(short3);
+char3 __ovld __cnfn convert_char3_sat_rtp(short3);
+char3 __ovld __cnfn convert_char3_rtn(short3);
+char3 __ovld __cnfn convert_char3_sat_rtn(short3);
+char3 __ovld __cnfn convert_char3(short3);
+char3 __ovld __cnfn convert_char3_sat(short3);
+char3 __ovld __cnfn convert_char3_rte(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rte(ushort3);
+char3 __ovld __cnfn convert_char3_rtz(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtz(ushort3);
+char3 __ovld __cnfn convert_char3_rtp(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtp(ushort3);
+char3 __ovld __cnfn convert_char3_rtn(ushort3);
+char3 __ovld __cnfn convert_char3_sat_rtn(ushort3);
+char3 __ovld __cnfn convert_char3(ushort3);
+char3 __ovld __cnfn convert_char3_sat(ushort3);
+char3 __ovld __cnfn convert_char3_rte(int3);
+char3 __ovld __cnfn convert_char3_sat_rte(int3);
+char3 __ovld __cnfn convert_char3_rtz(int3);
+char3 __ovld __cnfn convert_char3_sat_rtz(int3);
+char3 __ovld __cnfn convert_char3_rtp(int3);
+char3 __ovld __cnfn convert_char3_sat_rtp(int3);
+char3 __ovld __cnfn convert_char3_rtn(int3);
+char3 __ovld __cnfn convert_char3_sat_rtn(int3);
+char3 __ovld __cnfn convert_char3(int3);
+char3 __ovld __cnfn convert_char3_sat(int3);
+char3 __ovld __cnfn convert_char3_rte(uint3);
+char3 __ovld __cnfn convert_char3_sat_rte(uint3);
+char3 __ovld __cnfn convert_char3_rtz(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtz(uint3);
+char3 __ovld __cnfn convert_char3_rtp(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtp(uint3);
+char3 __ovld __cnfn convert_char3_rtn(uint3);
+char3 __ovld __cnfn convert_char3_sat_rtn(uint3);
+char3 __ovld __cnfn convert_char3(uint3);
+char3 __ovld __cnfn convert_char3_sat(uint3);
+char3 __ovld __cnfn convert_char3_rte(long3);
+char3 __ovld __cnfn convert_char3_sat_rte(long3);
+char3 __ovld __cnfn convert_char3_rtz(long3);
+char3 __ovld __cnfn convert_char3_sat_rtz(long3);
+char3 __ovld __cnfn convert_char3_rtp(long3);
+char3 __ovld __cnfn convert_char3_sat_rtp(long3);
+char3 __ovld __cnfn convert_char3_rtn(long3);
+char3 __ovld __cnfn convert_char3_sat_rtn(long3);
+char3 __ovld __cnfn convert_char3(long3);
+char3 __ovld __cnfn convert_char3_sat(long3);
+char3 __ovld __cnfn convert_char3_rte(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rte(ulong3);
+char3 __ovld __cnfn convert_char3_rtz(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtz(ulong3);
+char3 __ovld __cnfn convert_char3_rtp(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtp(ulong3);
+char3 __ovld __cnfn convert_char3_rtn(ulong3);
+char3 __ovld __cnfn convert_char3_sat_rtn(ulong3);
+char3 __ovld __cnfn convert_char3(ulong3);
+char3 __ovld __cnfn convert_char3_sat(ulong3);
+char3 __ovld __cnfn convert_char3_rte(float3);
+char3 __ovld __cnfn convert_char3_sat_rte(float3);
+char3 __ovld __cnfn convert_char3_rtz(float3);
+char3 __ovld __cnfn convert_char3_sat_rtz(float3);
+char3 __ovld __cnfn convert_char3_rtp(float3);
+char3 __ovld __cnfn convert_char3_sat_rtp(float3);
+char3 __ovld __cnfn convert_char3_rtn(float3);
+char3 __ovld __cnfn convert_char3_sat_rtn(float3);
+char3 __ovld __cnfn convert_char3(float3);
+char3 __ovld __cnfn convert_char3_sat(float3);
+uchar3 __ovld __cnfn convert_uchar3_rte(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3);
+uchar3 __ovld __cnfn convert_uchar3(char3);
+uchar3 __ovld __cnfn convert_uchar3_sat(char3);
+uchar3 __ovld __cnfn convert_uchar3_rte(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3);
+uchar3 __ovld __cnfn convert_uchar3(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_sat(uchar3);
+uchar3 __ovld __cnfn convert_uchar3_rte(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3);
+uchar3 __ovld __cnfn convert_uchar3(short3);
+uchar3 __ovld __cnfn convert_uchar3_sat(short3);
+uchar3 __ovld __cnfn convert_uchar3_rte(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3);
+uchar3 __ovld __cnfn convert_uchar3(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_sat(ushort3);
+uchar3 __ovld __cnfn convert_uchar3_rte(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3);
+uchar3 __ovld __cnfn convert_uchar3(int3);
+uchar3 __ovld __cnfn convert_uchar3_sat(int3);
+uchar3 __ovld __cnfn convert_uchar3_rte(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3);
+uchar3 __ovld __cnfn convert_uchar3(uint3);
+uchar3 __ovld __cnfn convert_uchar3_sat(uint3);
+uchar3 __ovld __cnfn convert_uchar3_rte(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3);
+uchar3 __ovld __cnfn convert_uchar3(long3);
+uchar3 __ovld __cnfn convert_uchar3_sat(long3);
+uchar3 __ovld __cnfn convert_uchar3_rte(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3);
+uchar3 __ovld __cnfn convert_uchar3(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_sat(ulong3);
+uchar3 __ovld __cnfn convert_uchar3_rte(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3);
+uchar3 __ovld __cnfn convert_uchar3(float3);
+uchar3 __ovld __cnfn convert_uchar3_sat(float3);
+short3 __ovld __cnfn convert_short3_rte(char3);
+short3 __ovld __cnfn convert_short3_sat_rte(char3);
+short3 __ovld __cnfn convert_short3_rtz(char3);
+short3 __ovld __cnfn convert_short3_sat_rtz(char3);
+short3 __ovld __cnfn convert_short3_rtp(char3);
+short3 __ovld __cnfn convert_short3_sat_rtp(char3);
+short3 __ovld __cnfn convert_short3_rtn(char3);
+short3 __ovld __cnfn convert_short3_sat_rtn(char3);
+short3 __ovld __cnfn convert_short3(char3);
+short3 __ovld __cnfn convert_short3_sat(char3);
+short3 __ovld __cnfn convert_short3_rte(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rte(uchar3);
+short3 __ovld __cnfn convert_short3_rtz(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtz(uchar3);
+short3 __ovld __cnfn convert_short3_rtp(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtp(uchar3);
+short3 __ovld __cnfn convert_short3_rtn(uchar3);
+short3 __ovld __cnfn convert_short3_sat_rtn(uchar3);
+short3 __ovld __cnfn convert_short3(uchar3);
+short3 __ovld __cnfn convert_short3_sat(uchar3);
+short3 __ovld __cnfn convert_short3_rte(short3);
+short3 __ovld __cnfn convert_short3_sat_rte(short3);
+short3 __ovld __cnfn convert_short3_rtz(short3);
+short3 __ovld __cnfn convert_short3_sat_rtz(short3);
+short3 __ovld __cnfn convert_short3_rtp(short3);
+short3 __ovld __cnfn convert_short3_sat_rtp(short3);
+short3 __ovld __cnfn convert_short3_rtn(short3);
+short3 __ovld __cnfn convert_short3_sat_rtn(short3);
+short3 __ovld __cnfn convert_short3(short3);
+short3 __ovld __cnfn convert_short3_sat(short3);
+short3 __ovld __cnfn convert_short3_rte(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rte(ushort3);
+short3 __ovld __cnfn convert_short3_rtz(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtz(ushort3);
+short3 __ovld __cnfn convert_short3_rtp(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtp(ushort3);
+short3 __ovld __cnfn convert_short3_rtn(ushort3);
+short3 __ovld __cnfn convert_short3_sat_rtn(ushort3);
+short3 __ovld __cnfn convert_short3(ushort3);
+short3 __ovld __cnfn convert_short3_sat(ushort3);
+short3 __ovld __cnfn convert_short3_rte(int3);
+short3 __ovld __cnfn convert_short3_sat_rte(int3);
+short3 __ovld __cnfn convert_short3_rtz(int3);
+short3 __ovld __cnfn convert_short3_sat_rtz(int3);
+short3 __ovld __cnfn convert_short3_rtp(int3);
+short3 __ovld __cnfn convert_short3_sat_rtp(int3);
+short3 __ovld __cnfn convert_short3_rtn(int3);
+short3 __ovld __cnfn convert_short3_sat_rtn(int3);
+short3 __ovld __cnfn convert_short3(int3);
+short3 __ovld __cnfn convert_short3_sat(int3);
+short3 __ovld __cnfn convert_short3_rte(uint3);
+short3 __ovld __cnfn convert_short3_sat_rte(uint3);
+short3 __ovld __cnfn convert_short3_rtz(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtz(uint3);
+short3 __ovld __cnfn convert_short3_rtp(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtp(uint3);
+short3 __ovld __cnfn convert_short3_rtn(uint3);
+short3 __ovld __cnfn convert_short3_sat_rtn(uint3);
+short3 __ovld __cnfn convert_short3(uint3);
+short3 __ovld __cnfn convert_short3_sat(uint3);
+short3 __ovld __cnfn convert_short3_rte(long3);
+short3 __ovld __cnfn convert_short3_sat_rte(long3);
+short3 __ovld __cnfn convert_short3_rtz(long3);
+short3 __ovld __cnfn convert_short3_sat_rtz(long3);
+short3 __ovld __cnfn convert_short3_rtp(long3);
+short3 __ovld __cnfn convert_short3_sat_rtp(long3);
+short3 __ovld __cnfn convert_short3_rtn(long3);
+short3 __ovld __cnfn convert_short3_sat_rtn(long3);
+short3 __ovld __cnfn convert_short3(long3);
+short3 __ovld __cnfn convert_short3_sat(long3);
+short3 __ovld __cnfn convert_short3_rte(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rte(ulong3);
+short3 __ovld __cnfn convert_short3_rtz(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtz(ulong3);
+short3 __ovld __cnfn convert_short3_rtp(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtp(ulong3);
+short3 __ovld __cnfn convert_short3_rtn(ulong3);
+short3 __ovld __cnfn convert_short3_sat_rtn(ulong3);
+short3 __ovld __cnfn convert_short3(ulong3);
+short3 __ovld __cnfn convert_short3_sat(ulong3);
+short3 __ovld __cnfn convert_short3_rte(float3);
+short3 __ovld __cnfn convert_short3_sat_rte(float3);
+short3 __ovld __cnfn convert_short3_rtz(float3);
+short3 __ovld __cnfn convert_short3_sat_rtz(float3);
+short3 __ovld __cnfn convert_short3_rtp(float3);
+short3 __ovld __cnfn convert_short3_sat_rtp(float3);
+short3 __ovld __cnfn convert_short3_rtn(float3);
+short3 __ovld __cnfn convert_short3_sat_rtn(float3);
+short3 __ovld __cnfn convert_short3(float3);
+short3 __ovld __cnfn convert_short3_sat(float3);
+ushort3 __ovld __cnfn convert_ushort3_rte(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3);
+ushort3 __ovld __cnfn convert_ushort3(char3);
+ushort3 __ovld __cnfn convert_ushort3_sat(char3);
+ushort3 __ovld __cnfn convert_ushort3_rte(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3);
+ushort3 __ovld __cnfn convert_ushort3(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_sat(uchar3);
+ushort3 __ovld __cnfn convert_ushort3_rte(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3);
+ushort3 __ovld __cnfn convert_ushort3(short3);
+ushort3 __ovld __cnfn convert_ushort3_sat(short3);
+ushort3 __ovld __cnfn convert_ushort3_rte(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3);
+ushort3 __ovld __cnfn convert_ushort3(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_sat(ushort3);
+ushort3 __ovld __cnfn convert_ushort3_rte(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3);
+ushort3 __ovld __cnfn convert_ushort3(int3);
+ushort3 __ovld __cnfn convert_ushort3_sat(int3);
+ushort3 __ovld __cnfn convert_ushort3_rte(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3);
+ushort3 __ovld __cnfn convert_ushort3(uint3);
+ushort3 __ovld __cnfn convert_ushort3_sat(uint3);
+ushort3 __ovld __cnfn convert_ushort3_rte(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3);
+ushort3 __ovld __cnfn convert_ushort3(long3);
+ushort3 __ovld __cnfn convert_ushort3_sat(long3);
+ushort3 __ovld __cnfn convert_ushort3_rte(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3);
+ushort3 __ovld __cnfn convert_ushort3(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_sat(ulong3);
+ushort3 __ovld __cnfn convert_ushort3_rte(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3);
+ushort3 __ovld __cnfn convert_ushort3(float3);
+ushort3 __ovld __cnfn convert_ushort3_sat(float3);
+int3 __ovld __cnfn convert_int3_rte(char3);
+int3 __ovld __cnfn convert_int3_sat_rte(char3);
+int3 __ovld __cnfn convert_int3_rtz(char3);
+int3 __ovld __cnfn convert_int3_sat_rtz(char3);
+int3 __ovld __cnfn convert_int3_rtp(char3);
+int3 __ovld __cnfn convert_int3_sat_rtp(char3);
+int3 __ovld __cnfn convert_int3_rtn(char3);
+int3 __ovld __cnfn convert_int3_sat_rtn(char3);
+int3 __ovld __cnfn convert_int3(char3);
+int3 __ovld __cnfn convert_int3_sat(char3);
+int3 __ovld __cnfn convert_int3_rte(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rte(uchar3);
+int3 __ovld __cnfn convert_int3_rtz(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtz(uchar3);
+int3 __ovld __cnfn convert_int3_rtp(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtp(uchar3);
+int3 __ovld __cnfn convert_int3_rtn(uchar3);
+int3 __ovld __cnfn convert_int3_sat_rtn(uchar3);
+int3 __ovld __cnfn convert_int3(uchar3);
+int3 __ovld __cnfn convert_int3_sat(uchar3);
+int3 __ovld __cnfn convert_int3_rte(short3);
+int3 __ovld __cnfn convert_int3_sat_rte(short3);
+int3 __ovld __cnfn convert_int3_rtz(short3);
+int3 __ovld __cnfn convert_int3_sat_rtz(short3);
+int3 __ovld __cnfn convert_int3_rtp(short3);
+int3 __ovld __cnfn convert_int3_sat_rtp(short3);
+int3 __ovld __cnfn convert_int3_rtn(short3);
+int3 __ovld __cnfn convert_int3_sat_rtn(short3);
+int3 __ovld __cnfn convert_int3(short3);
+int3 __ovld __cnfn convert_int3_sat(short3);
+int3 __ovld __cnfn convert_int3_rte(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rte(ushort3);
+int3 __ovld __cnfn convert_int3_rtz(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtz(ushort3);
+int3 __ovld __cnfn convert_int3_rtp(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtp(ushort3);
+int3 __ovld __cnfn convert_int3_rtn(ushort3);
+int3 __ovld __cnfn convert_int3_sat_rtn(ushort3);
+int3 __ovld __cnfn convert_int3(ushort3);
+int3 __ovld __cnfn convert_int3_sat(ushort3);
+int3 __ovld __cnfn convert_int3_rte(int3);
+int3 __ovld __cnfn convert_int3_sat_rte(int3);
+int3 __ovld __cnfn convert_int3_rtz(int3);
+int3 __ovld __cnfn convert_int3_sat_rtz(int3);
+int3 __ovld __cnfn convert_int3_rtp(int3);
+int3 __ovld __cnfn convert_int3_sat_rtp(int3);
+int3 __ovld __cnfn convert_int3_rtn(int3);
+int3 __ovld __cnfn convert_int3_sat_rtn(int3);
+int3 __ovld __cnfn convert_int3(int3);
+int3 __ovld __cnfn convert_int3_sat(int3);
+int3 __ovld __cnfn convert_int3_rte(uint3);
+int3 __ovld __cnfn convert_int3_sat_rte(uint3);
+int3 __ovld __cnfn convert_int3_rtz(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtz(uint3);
+int3 __ovld __cnfn convert_int3_rtp(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtp(uint3);
+int3 __ovld __cnfn convert_int3_rtn(uint3);
+int3 __ovld __cnfn convert_int3_sat_rtn(uint3);
+int3 __ovld __cnfn convert_int3(uint3);
+int3 __ovld __cnfn convert_int3_sat(uint3);
+int3 __ovld __cnfn convert_int3_rte(long3);
+int3 __ovld __cnfn convert_int3_sat_rte(long3);
+int3 __ovld __cnfn convert_int3_rtz(long3);
+int3 __ovld __cnfn convert_int3_sat_rtz(long3);
+int3 __ovld __cnfn convert_int3_rtp(long3);
+int3 __ovld __cnfn convert_int3_sat_rtp(long3);
+int3 __ovld __cnfn convert_int3_rtn(long3);
+int3 __ovld __cnfn convert_int3_sat_rtn(long3);
+int3 __ovld __cnfn convert_int3(long3);
+int3 __ovld __cnfn convert_int3_sat(long3);
+int3 __ovld __cnfn convert_int3_rte(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rte(ulong3);
+int3 __ovld __cnfn convert_int3_rtz(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtz(ulong3);
+int3 __ovld __cnfn convert_int3_rtp(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtp(ulong3);
+int3 __ovld __cnfn convert_int3_rtn(ulong3);
+int3 __ovld __cnfn convert_int3_sat_rtn(ulong3);
+int3 __ovld __cnfn convert_int3(ulong3);
+int3 __ovld __cnfn convert_int3_sat(ulong3);
+int3 __ovld __cnfn convert_int3_rte(float3);
+int3 __ovld __cnfn convert_int3_sat_rte(float3);
+int3 __ovld __cnfn convert_int3_rtz(float3);
+int3 __ovld __cnfn convert_int3_sat_rtz(float3);
+int3 __ovld __cnfn convert_int3_rtp(float3);
+int3 __ovld __cnfn convert_int3_sat_rtp(float3);
+int3 __ovld __cnfn convert_int3_rtn(float3);
+int3 __ovld __cnfn convert_int3_sat_rtn(float3);
+int3 __ovld __cnfn convert_int3(float3);
+int3 __ovld __cnfn convert_int3_sat(float3);
+uint3 __ovld __cnfn convert_uint3_rte(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(char3);
+uint3 __ovld __cnfn convert_uint3_rtz(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(char3);
+uint3 __ovld __cnfn convert_uint3_rtp(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(char3);
+uint3 __ovld __cnfn convert_uint3_rtn(char3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(char3);
+uint3 __ovld __cnfn convert_uint3(char3);
+uint3 __ovld __cnfn convert_uint3_sat(char3);
+uint3 __ovld __cnfn convert_uint3_rte(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtz(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtp(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3);
+uint3 __ovld __cnfn convert_uint3_rtn(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3);
+uint3 __ovld __cnfn convert_uint3(uchar3);
+uint3 __ovld __cnfn convert_uint3_sat(uchar3);
+uint3 __ovld __cnfn convert_uint3_rte(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(short3);
+uint3 __ovld __cnfn convert_uint3_rtz(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(short3);
+uint3 __ovld __cnfn convert_uint3_rtp(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(short3);
+uint3 __ovld __cnfn convert_uint3_rtn(short3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(short3);
+uint3 __ovld __cnfn convert_uint3(short3);
+uint3 __ovld __cnfn convert_uint3_sat(short3);
+uint3 __ovld __cnfn convert_uint3_rte(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtz(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtp(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3);
+uint3 __ovld __cnfn convert_uint3_rtn(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3);
+uint3 __ovld __cnfn convert_uint3(ushort3);
+uint3 __ovld __cnfn convert_uint3_sat(ushort3);
+uint3 __ovld __cnfn convert_uint3_rte(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(int3);
+uint3 __ovld __cnfn convert_uint3_rtz(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(int3);
+uint3 __ovld __cnfn convert_uint3_rtp(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(int3);
+uint3 __ovld __cnfn convert_uint3_rtn(int3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(int3);
+uint3 __ovld __cnfn convert_uint3(int3);
+uint3 __ovld __cnfn convert_uint3_sat(int3);
+uint3 __ovld __cnfn convert_uint3_rte(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(uint3);
+uint3 __ovld __cnfn convert_uint3_rtz(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3);
+uint3 __ovld __cnfn convert_uint3_rtp(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3);
+uint3 __ovld __cnfn convert_uint3_rtn(uint3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3);
+uint3 __ovld __cnfn convert_uint3(uint3);
+uint3 __ovld __cnfn convert_uint3_sat(uint3);
+uint3 __ovld __cnfn convert_uint3_rte(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(long3);
+uint3 __ovld __cnfn convert_uint3_rtz(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(long3);
+uint3 __ovld __cnfn convert_uint3_rtp(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(long3);
+uint3 __ovld __cnfn convert_uint3_rtn(long3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(long3);
+uint3 __ovld __cnfn convert_uint3(long3);
+uint3 __ovld __cnfn convert_uint3_sat(long3);
+uint3 __ovld __cnfn convert_uint3_rte(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtz(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtp(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3);
+uint3 __ovld __cnfn convert_uint3_rtn(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3);
+uint3 __ovld __cnfn convert_uint3(ulong3);
+uint3 __ovld __cnfn convert_uint3_sat(ulong3);
+uint3 __ovld __cnfn convert_uint3_rte(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(float3);
+uint3 __ovld __cnfn convert_uint3_rtz(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(float3);
+uint3 __ovld __cnfn convert_uint3_rtp(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(float3);
+uint3 __ovld __cnfn convert_uint3_rtn(float3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(float3);
+uint3 __ovld __cnfn convert_uint3(float3);
+uint3 __ovld __cnfn convert_uint3_sat(float3);
+long3 __ovld __cnfn convert_long3_rte(char3);
+long3 __ovld __cnfn convert_long3_sat_rte(char3);
+long3 __ovld __cnfn convert_long3_rtz(char3);
+long3 __ovld __cnfn convert_long3_sat_rtz(char3);
+long3 __ovld __cnfn convert_long3_rtp(char3);
+long3 __ovld __cnfn convert_long3_sat_rtp(char3);
+long3 __ovld __cnfn convert_long3_rtn(char3);
+long3 __ovld __cnfn convert_long3_sat_rtn(char3);
+long3 __ovld __cnfn convert_long3(char3);
+long3 __ovld __cnfn convert_long3_sat(char3);
+long3 __ovld __cnfn convert_long3_rte(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rte(uchar3);
+long3 __ovld __cnfn convert_long3_rtz(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtz(uchar3);
+long3 __ovld __cnfn convert_long3_rtp(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtp(uchar3);
+long3 __ovld __cnfn convert_long3_rtn(uchar3);
+long3 __ovld __cnfn convert_long3_sat_rtn(uchar3);
+long3 __ovld __cnfn convert_long3(uchar3);
+long3 __ovld __cnfn convert_long3_sat(uchar3);
+long3 __ovld __cnfn convert_long3_rte(short3);
+long3 __ovld __cnfn convert_long3_sat_rte(short3);
+long3 __ovld __cnfn convert_long3_rtz(short3);
+long3 __ovld __cnfn convert_long3_sat_rtz(short3);
+long3 __ovld __cnfn convert_long3_rtp(short3);
+long3 __ovld __cnfn convert_long3_sat_rtp(short3);
+long3 __ovld __cnfn convert_long3_rtn(short3);
+long3 __ovld __cnfn convert_long3_sat_rtn(short3);
+long3 __ovld __cnfn convert_long3(short3);
+long3 __ovld __cnfn convert_long3_sat(short3);
+long3 __ovld __cnfn convert_long3_rte(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rte(ushort3);
+long3 __ovld __cnfn convert_long3_rtz(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtz(ushort3);
+long3 __ovld __cnfn convert_long3_rtp(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtp(ushort3);
+long3 __ovld __cnfn convert_long3_rtn(ushort3);
+long3 __ovld __cnfn convert_long3_sat_rtn(ushort3);
+long3 __ovld __cnfn convert_long3(ushort3);
+long3 __ovld __cnfn convert_long3_sat(ushort3);
+long3 __ovld __cnfn convert_long3_rte(int3);
+long3 __ovld __cnfn convert_long3_sat_rte(int3);
+long3 __ovld __cnfn convert_long3_rtz(int3);
+long3 __ovld __cnfn convert_long3_sat_rtz(int3);
+long3 __ovld __cnfn convert_long3_rtp(int3);
+long3 __ovld __cnfn convert_long3_sat_rtp(int3);
+long3 __ovld __cnfn convert_long3_rtn(int3);
+long3 __ovld __cnfn convert_long3_sat_rtn(int3);
+long3 __ovld __cnfn convert_long3(int3);
+long3 __ovld __cnfn convert_long3_sat(int3);
+long3 __ovld __cnfn convert_long3_rte(uint3);
+long3 __ovld __cnfn convert_long3_sat_rte(uint3);
+long3 __ovld __cnfn convert_long3_rtz(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtz(uint3);
+long3 __ovld __cnfn convert_long3_rtp(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtp(uint3);
+long3 __ovld __cnfn convert_long3_rtn(uint3);
+long3 __ovld __cnfn convert_long3_sat_rtn(uint3);
+long3 __ovld __cnfn convert_long3(uint3);
+long3 __ovld __cnfn convert_long3_sat(uint3);
+long3 __ovld __cnfn convert_long3_rte(long3);
+long3 __ovld __cnfn convert_long3_sat_rte(long3);
+long3 __ovld __cnfn convert_long3_rtz(long3);
+long3 __ovld __cnfn convert_long3_sat_rtz(long3);
+long3 __ovld __cnfn convert_long3_rtp(long3);
+long3 __ovld __cnfn convert_long3_sat_rtp(long3);
+long3 __ovld __cnfn convert_long3_rtn(long3);
+long3 __ovld __cnfn convert_long3_sat_rtn(long3);
+long3 __ovld __cnfn convert_long3(long3);
+long3 __ovld __cnfn convert_long3_sat(long3);
+long3 __ovld __cnfn convert_long3_rte(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rte(ulong3);
+long3 __ovld __cnfn convert_long3_rtz(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtz(ulong3);
+long3 __ovld __cnfn convert_long3_rtp(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtp(ulong3);
+long3 __ovld __cnfn convert_long3_rtn(ulong3);
+long3 __ovld __cnfn convert_long3_sat_rtn(ulong3);
+long3 __ovld __cnfn convert_long3(ulong3);
+long3 __ovld __cnfn convert_long3_sat(ulong3);
+long3 __ovld __cnfn convert_long3_rte(float3);
+long3 __ovld __cnfn convert_long3_sat_rte(float3);
+long3 __ovld __cnfn convert_long3_rtz(float3);
+long3 __ovld __cnfn convert_long3_sat_rtz(float3);
+long3 __ovld __cnfn convert_long3_rtp(float3);
+long3 __ovld __cnfn convert_long3_sat_rtp(float3);
+long3 __ovld __cnfn convert_long3_rtn(float3);
+long3 __ovld __cnfn convert_long3_sat_rtn(float3);
+long3 __ovld __cnfn convert_long3(float3);
+long3 __ovld __cnfn convert_long3_sat(float3);
+ulong3 __ovld __cnfn convert_ulong3_rte(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3);
+ulong3 __ovld __cnfn convert_ulong3(char3);
+ulong3 __ovld __cnfn convert_ulong3_sat(char3);
+ulong3 __ovld __cnfn convert_ulong3_rte(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3);
+ulong3 __ovld __cnfn convert_ulong3(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_sat(uchar3);
+ulong3 __ovld __cnfn convert_ulong3_rte(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3);
+ulong3 __ovld __cnfn convert_ulong3(short3);
+ulong3 __ovld __cnfn convert_ulong3_sat(short3);
+ulong3 __ovld __cnfn convert_ulong3_rte(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3);
+ulong3 __ovld __cnfn convert_ulong3(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_sat(ushort3);
+ulong3 __ovld __cnfn convert_ulong3_rte(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3);
+ulong3 __ovld __cnfn convert_ulong3(int3);
+ulong3 __ovld __cnfn convert_ulong3_sat(int3);
+ulong3 __ovld __cnfn convert_ulong3_rte(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3);
+ulong3 __ovld __cnfn convert_ulong3(uint3);
+ulong3 __ovld __cnfn convert_ulong3_sat(uint3);
+ulong3 __ovld __cnfn convert_ulong3_rte(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3);
+ulong3 __ovld __cnfn convert_ulong3(long3);
+ulong3 __ovld __cnfn convert_ulong3_sat(long3);
+ulong3 __ovld __cnfn convert_ulong3_rte(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3);
+ulong3 __ovld __cnfn convert_ulong3(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_sat(ulong3);
+ulong3 __ovld __cnfn convert_ulong3_rte(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3);
+ulong3 __ovld __cnfn convert_ulong3(float3);
+ulong3 __ovld __cnfn convert_ulong3_sat(float3);
+float3 __ovld __cnfn convert_float3_rte(char3);
+float3 __ovld __cnfn convert_float3_rtz(char3);
+float3 __ovld __cnfn convert_float3_rtp(char3);
+float3 __ovld __cnfn convert_float3_rtn(char3);
+float3 __ovld __cnfn convert_float3(char3);
+float3 __ovld __cnfn convert_float3_rte(uchar3);
+float3 __ovld __cnfn convert_float3_rtz(uchar3);
+float3 __ovld __cnfn convert_float3_rtp(uchar3);
+float3 __ovld __cnfn convert_float3_rtn(uchar3);
+float3 __ovld __cnfn convert_float3(uchar3);
+float3 __ovld __cnfn convert_float3_rte(short3);
+float3 __ovld __cnfn convert_float3_rtz(short3);
+float3 __ovld __cnfn convert_float3_rtp(short3);
+float3 __ovld __cnfn convert_float3_rtn(short3);
+float3 __ovld __cnfn convert_float3(short3);
+float3 __ovld __cnfn convert_float3_rte(ushort3);
+float3 __ovld __cnfn convert_float3_rtz(ushort3);
+float3 __ovld __cnfn convert_float3_rtp(ushort3);
+float3 __ovld __cnfn convert_float3_rtn(ushort3);
+float3 __ovld __cnfn convert_float3(ushort3);
+float3 __ovld __cnfn convert_float3_rte(int3);
+float3 __ovld __cnfn convert_float3_rtz(int3);
+float3 __ovld __cnfn convert_float3_rtp(int3);
+float3 __ovld __cnfn convert_float3_rtn(int3);
+float3 __ovld __cnfn convert_float3(int3);
+float3 __ovld __cnfn convert_float3_rte(uint3);
+float3 __ovld __cnfn convert_float3_rtz(uint3);
+float3 __ovld __cnfn convert_float3_rtp(uint3);
+float3 __ovld __cnfn convert_float3_rtn(uint3);
+float3 __ovld __cnfn convert_float3(uint3);
+float3 __ovld __cnfn convert_float3_rte(long3);
+float3 __ovld __cnfn convert_float3_rtz(long3);
+float3 __ovld __cnfn convert_float3_rtp(long3);
+float3 __ovld __cnfn convert_float3_rtn(long3);
+float3 __ovld __cnfn convert_float3(long3);
+float3 __ovld __cnfn convert_float3_rte(ulong3);
+float3 __ovld __cnfn convert_float3_rtz(ulong3);
+float3 __ovld __cnfn convert_float3_rtp(ulong3);
+float3 __ovld __cnfn convert_float3_rtn(ulong3);
+float3 __ovld __cnfn convert_float3(ulong3);
+float3 __ovld __cnfn convert_float3_rte(float3);
+float3 __ovld __cnfn convert_float3_rtz(float3);
+float3 __ovld __cnfn convert_float3_rtp(float3);
+float3 __ovld __cnfn convert_float3_rtn(float3);
+float3 __ovld __cnfn convert_float3(float3);
+char4 __ovld __cnfn convert_char4_rte(char4);
+char4 __ovld __cnfn convert_char4_sat_rte(char4);
+char4 __ovld __cnfn convert_char4_rtz(char4);
+char4 __ovld __cnfn convert_char4_sat_rtz(char4);
+char4 __ovld __cnfn convert_char4_rtp(char4);
+char4 __ovld __cnfn convert_char4_sat_rtp(char4);
+char4 __ovld __cnfn convert_char4_rtn(char4);
+char4 __ovld __cnfn convert_char4_sat_rtn(char4);
+char4 __ovld __cnfn convert_char4(char4);
+char4 __ovld __cnfn convert_char4_sat(char4);
+char4 __ovld __cnfn convert_char4_rte(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rte(uchar4);
+char4 __ovld __cnfn convert_char4_rtz(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtz(uchar4);
+char4 __ovld __cnfn convert_char4_rtp(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtp(uchar4);
+char4 __ovld __cnfn convert_char4_rtn(uchar4);
+char4 __ovld __cnfn convert_char4_sat_rtn(uchar4);
+char4 __ovld __cnfn convert_char4(uchar4);
+char4 __ovld __cnfn convert_char4_sat(uchar4);
+char4 __ovld __cnfn convert_char4_rte(short4);
+char4 __ovld __cnfn convert_char4_sat_rte(short4);
+char4 __ovld __cnfn convert_char4_rtz(short4);
+char4 __ovld __cnfn convert_char4_sat_rtz(short4);
+char4 __ovld __cnfn convert_char4_rtp(short4);
+char4 __ovld __cnfn convert_char4_sat_rtp(short4);
+char4 __ovld __cnfn convert_char4_rtn(short4);
+char4 __ovld __cnfn convert_char4_sat_rtn(short4);
+char4 __ovld __cnfn convert_char4(short4);
+char4 __ovld __cnfn convert_char4_sat(short4);
+char4 __ovld __cnfn convert_char4_rte(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rte(ushort4);
+char4 __ovld __cnfn convert_char4_rtz(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtz(ushort4);
+char4 __ovld __cnfn convert_char4_rtp(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtp(ushort4);
+char4 __ovld __cnfn convert_char4_rtn(ushort4);
+char4 __ovld __cnfn convert_char4_sat_rtn(ushort4);
+char4 __ovld __cnfn convert_char4(ushort4);
+char4 __ovld __cnfn convert_char4_sat(ushort4);
+char4 __ovld __cnfn convert_char4_rte(int4);
+char4 __ovld __cnfn convert_char4_sat_rte(int4);
+char4 __ovld __cnfn convert_char4_rtz(int4);
+char4 __ovld __cnfn convert_char4_sat_rtz(int4);
+char4 __ovld __cnfn convert_char4_rtp(int4);
+char4 __ovld __cnfn convert_char4_sat_rtp(int4);
+char4 __ovld __cnfn convert_char4_rtn(int4);
+char4 __ovld __cnfn convert_char4_sat_rtn(int4);
+char4 __ovld __cnfn convert_char4(int4);
+char4 __ovld __cnfn convert_char4_sat(int4);
+char4 __ovld __cnfn convert_char4_rte(uint4);
+char4 __ovld __cnfn convert_char4_sat_rte(uint4);
+char4 __ovld __cnfn convert_char4_rtz(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtz(uint4);
+char4 __ovld __cnfn convert_char4_rtp(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtp(uint4);
+char4 __ovld __cnfn convert_char4_rtn(uint4);
+char4 __ovld __cnfn convert_char4_sat_rtn(uint4);
+char4 __ovld __cnfn convert_char4(uint4);
+char4 __ovld __cnfn convert_char4_sat(uint4);
+char4 __ovld __cnfn convert_char4_rte(long4);
+char4 __ovld __cnfn convert_char4_sat_rte(long4);
+char4 __ovld __cnfn convert_char4_rtz(long4);
+char4 __ovld __cnfn convert_char4_sat_rtz(long4);
+char4 __ovld __cnfn convert_char4_rtp(long4);
+char4 __ovld __cnfn convert_char4_sat_rtp(long4);
+char4 __ovld __cnfn convert_char4_rtn(long4);
+char4 __ovld __cnfn convert_char4_sat_rtn(long4);
+char4 __ovld __cnfn convert_char4(long4);
+char4 __ovld __cnfn convert_char4_sat(long4);
+char4 __ovld __cnfn convert_char4_rte(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rte(ulong4);
+char4 __ovld __cnfn convert_char4_rtz(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtz(ulong4);
+char4 __ovld __cnfn convert_char4_rtp(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtp(ulong4);
+char4 __ovld __cnfn convert_char4_rtn(ulong4);
+char4 __ovld __cnfn convert_char4_sat_rtn(ulong4);
+char4 __ovld __cnfn convert_char4(ulong4);
+char4 __ovld __cnfn convert_char4_sat(ulong4);
+char4 __ovld __cnfn convert_char4_rte(float4);
+char4 __ovld __cnfn convert_char4_sat_rte(float4);
+char4 __ovld __cnfn convert_char4_rtz(float4);
+char4 __ovld __cnfn convert_char4_sat_rtz(float4);
+char4 __ovld __cnfn convert_char4_rtp(float4);
+char4 __ovld __cnfn convert_char4_sat_rtp(float4);
+char4 __ovld __cnfn convert_char4_rtn(float4);
+char4 __ovld __cnfn convert_char4_sat_rtn(float4);
+char4 __ovld __cnfn convert_char4(float4);
+char4 __ovld __cnfn convert_char4_sat(float4);
+uchar4 __ovld __cnfn convert_uchar4_rte(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4);
+uchar4 __ovld __cnfn convert_uchar4(char4);
+uchar4 __ovld __cnfn convert_uchar4_sat(char4);
+uchar4 __ovld __cnfn convert_uchar4_rte(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4);
+uchar4 __ovld __cnfn convert_uchar4(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_sat(uchar4);
+uchar4 __ovld __cnfn convert_uchar4_rte(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4);
+uchar4 __ovld __cnfn convert_uchar4(short4);
+uchar4 __ovld __cnfn convert_uchar4_sat(short4);
+uchar4 __ovld __cnfn convert_uchar4_rte(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4);
+uchar4 __ovld __cnfn convert_uchar4(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_sat(ushort4);
+uchar4 __ovld __cnfn convert_uchar4_rte(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4);
+uchar4 __ovld __cnfn convert_uchar4(int4);
+uchar4 __ovld __cnfn convert_uchar4_sat(int4);
+uchar4 __ovld __cnfn convert_uchar4_rte(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4);
+uchar4 __ovld __cnfn convert_uchar4(uint4);
+uchar4 __ovld __cnfn convert_uchar4_sat(uint4);
+uchar4 __ovld __cnfn convert_uchar4_rte(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4);
+uchar4 __ovld __cnfn convert_uchar4(long4);
+uchar4 __ovld __cnfn convert_uchar4_sat(long4);
+uchar4 __ovld __cnfn convert_uchar4_rte(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4);
+uchar4 __ovld __cnfn convert_uchar4(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_sat(ulong4);
+uchar4 __ovld __cnfn convert_uchar4_rte(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4);
+uchar4 __ovld __cnfn convert_uchar4(float4);
+uchar4 __ovld __cnfn convert_uchar4_sat(float4);
+short4 __ovld __cnfn convert_short4_rte(char4);
+short4 __ovld __cnfn convert_short4_sat_rte(char4);
+short4 __ovld __cnfn convert_short4_rtz(char4);
+short4 __ovld __cnfn convert_short4_sat_rtz(char4);
+short4 __ovld __cnfn convert_short4_rtp(char4);
+short4 __ovld __cnfn convert_short4_sat_rtp(char4);
+short4 __ovld __cnfn convert_short4_rtn(char4);
+short4 __ovld __cnfn convert_short4_sat_rtn(char4);
+short4 __ovld __cnfn convert_short4(char4);
+short4 __ovld __cnfn convert_short4_sat(char4);
+short4 __ovld __cnfn convert_short4_rte(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rte(uchar4);
+short4 __ovld __cnfn convert_short4_rtz(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtz(uchar4);
+short4 __ovld __cnfn convert_short4_rtp(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtp(uchar4);
+short4 __ovld __cnfn convert_short4_rtn(uchar4);
+short4 __ovld __cnfn convert_short4_sat_rtn(uchar4);
+short4 __ovld __cnfn convert_short4(uchar4);
+short4 __ovld __cnfn convert_short4_sat(uchar4);
+short4 __ovld __cnfn convert_short4_rte(short4);
+short4 __ovld __cnfn convert_short4_sat_rte(short4);
+short4 __ovld __cnfn convert_short4_rtz(short4);
+short4 __ovld __cnfn convert_short4_sat_rtz(short4);
+short4 __ovld __cnfn convert_short4_rtp(short4);
+short4 __ovld __cnfn convert_short4_sat_rtp(short4);
+short4 __ovld __cnfn convert_short4_rtn(short4);
+short4 __ovld __cnfn convert_short4_sat_rtn(short4);
+short4 __ovld __cnfn convert_short4(short4);
+short4 __ovld __cnfn convert_short4_sat(short4);
+short4 __ovld __cnfn convert_short4_rte(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rte(ushort4);
+short4 __ovld __cnfn convert_short4_rtz(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtz(ushort4);
+short4 __ovld __cnfn convert_short4_rtp(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtp(ushort4);
+short4 __ovld __cnfn convert_short4_rtn(ushort4);
+short4 __ovld __cnfn convert_short4_sat_rtn(ushort4);
+short4 __ovld __cnfn convert_short4(ushort4);
+short4 __ovld __cnfn convert_short4_sat(ushort4);
+short4 __ovld __cnfn convert_short4_rte(int4);
+short4 __ovld __cnfn convert_short4_sat_rte(int4);
+short4 __ovld __cnfn convert_short4_rtz(int4);
+short4 __ovld __cnfn convert_short4_sat_rtz(int4);
+short4 __ovld __cnfn convert_short4_rtp(int4);
+short4 __ovld __cnfn convert_short4_sat_rtp(int4);
+short4 __ovld __cnfn convert_short4_rtn(int4);
+short4 __ovld __cnfn convert_short4_sat_rtn(int4);
+short4 __ovld __cnfn convert_short4(int4);
+short4 __ovld __cnfn convert_short4_sat(int4);
+short4 __ovld __cnfn convert_short4_rte(uint4);
+short4 __ovld __cnfn convert_short4_sat_rte(uint4);
+short4 __ovld __cnfn convert_short4_rtz(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtz(uint4);
+short4 __ovld __cnfn convert_short4_rtp(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtp(uint4);
+short4 __ovld __cnfn convert_short4_rtn(uint4);
+short4 __ovld __cnfn convert_short4_sat_rtn(uint4);
+short4 __ovld __cnfn convert_short4(uint4);
+short4 __ovld __cnfn convert_short4_sat(uint4);
+short4 __ovld __cnfn convert_short4_rte(long4);
+short4 __ovld __cnfn convert_short4_sat_rte(long4);
+short4 __ovld __cnfn convert_short4_rtz(long4);
+short4 __ovld __cnfn convert_short4_sat_rtz(long4);
+short4 __ovld __cnfn convert_short4_rtp(long4);
+short4 __ovld __cnfn convert_short4_sat_rtp(long4);
+short4 __ovld __cnfn convert_short4_rtn(long4);
+short4 __ovld __cnfn convert_short4_sat_rtn(long4);
+short4 __ovld __cnfn convert_short4(long4);
+short4 __ovld __cnfn convert_short4_sat(long4);
+short4 __ovld __cnfn convert_short4_rte(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rte(ulong4);
+short4 __ovld __cnfn convert_short4_rtz(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtz(ulong4);
+short4 __ovld __cnfn convert_short4_rtp(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtp(ulong4);
+short4 __ovld __cnfn convert_short4_rtn(ulong4);
+short4 __ovld __cnfn convert_short4_sat_rtn(ulong4);
+short4 __ovld __cnfn convert_short4(ulong4);
+short4 __ovld __cnfn convert_short4_sat(ulong4);
+short4 __ovld __cnfn convert_short4_rte(float4);
+short4 __ovld __cnfn convert_short4_sat_rte(float4);
+short4 __ovld __cnfn convert_short4_rtz(float4);
+short4 __ovld __cnfn convert_short4_sat_rtz(float4);
+short4 __ovld __cnfn convert_short4_rtp(float4);
+short4 __ovld __cnfn convert_short4_sat_rtp(float4);
+short4 __ovld __cnfn convert_short4_rtn(float4);
+short4 __ovld __cnfn convert_short4_sat_rtn(float4);
+short4 __ovld __cnfn convert_short4(float4);
+short4 __ovld __cnfn convert_short4_sat(float4);
+ushort4 __ovld __cnfn convert_ushort4_rte(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4);
+ushort4 __ovld __cnfn convert_ushort4(char4);
+ushort4 __ovld __cnfn convert_ushort4_sat(char4);
+ushort4 __ovld __cnfn convert_ushort4_rte(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4);
+ushort4 __ovld __cnfn convert_ushort4(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_sat(uchar4);
+ushort4 __ovld __cnfn convert_ushort4_rte(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4);
+ushort4 __ovld __cnfn convert_ushort4(short4);
+ushort4 __ovld __cnfn convert_ushort4_sat(short4);
+ushort4 __ovld __cnfn convert_ushort4_rte(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4);
+ushort4 __ovld __cnfn convert_ushort4(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_sat(ushort4);
+ushort4 __ovld __cnfn convert_ushort4_rte(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4);
+ushort4 __ovld __cnfn convert_ushort4(int4);
+ushort4 __ovld __cnfn convert_ushort4_sat(int4);
+ushort4 __ovld __cnfn convert_ushort4_rte(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4);
+ushort4 __ovld __cnfn convert_ushort4(uint4);
+ushort4 __ovld __cnfn convert_ushort4_sat(uint4);
+ushort4 __ovld __cnfn convert_ushort4_rte(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4);
+ushort4 __ovld __cnfn convert_ushort4(long4);
+ushort4 __ovld __cnfn convert_ushort4_sat(long4);
+ushort4 __ovld __cnfn convert_ushort4_rte(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4);
+ushort4 __ovld __cnfn convert_ushort4(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_sat(ulong4);
+ushort4 __ovld __cnfn convert_ushort4_rte(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4);
+ushort4 __ovld __cnfn convert_ushort4(float4);
+ushort4 __ovld __cnfn convert_ushort4_sat(float4);
+int4 __ovld __cnfn convert_int4_rte(char4);
+int4 __ovld __cnfn convert_int4_sat_rte(char4);
+int4 __ovld __cnfn convert_int4_rtz(char4);
+int4 __ovld __cnfn convert_int4_sat_rtz(char4);
+int4 __ovld __cnfn convert_int4_rtp(char4);
+int4 __ovld __cnfn convert_int4_sat_rtp(char4);
+int4 __ovld __cnfn convert_int4_rtn(char4);
+int4 __ovld __cnfn convert_int4_sat_rtn(char4);
+int4 __ovld __cnfn convert_int4(char4);
+int4 __ovld __cnfn convert_int4_sat(char4);
+int4 __ovld __cnfn convert_int4_rte(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rte(uchar4);
+int4 __ovld __cnfn convert_int4_rtz(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtz(uchar4);
+int4 __ovld __cnfn convert_int4_rtp(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtp(uchar4);
+int4 __ovld __cnfn convert_int4_rtn(uchar4);
+int4 __ovld __cnfn convert_int4_sat_rtn(uchar4);
+int4 __ovld __cnfn convert_int4(uchar4);
+int4 __ovld __cnfn convert_int4_sat(uchar4);
+int4 __ovld __cnfn convert_int4_rte(short4);
+int4 __ovld __cnfn convert_int4_sat_rte(short4);
+int4 __ovld __cnfn convert_int4_rtz(short4);
+int4 __ovld __cnfn convert_int4_sat_rtz(short4);
+int4 __ovld __cnfn convert_int4_rtp(short4);
+int4 __ovld __cnfn convert_int4_sat_rtp(short4);
+int4 __ovld __cnfn convert_int4_rtn(short4);
+int4 __ovld __cnfn convert_int4_sat_rtn(short4);
+int4 __ovld __cnfn convert_int4(short4);
+int4 __ovld __cnfn convert_int4_sat(short4);
+int4 __ovld __cnfn convert_int4_rte(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rte(ushort4);
+int4 __ovld __cnfn convert_int4_rtz(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtz(ushort4);
+int4 __ovld __cnfn convert_int4_rtp(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtp(ushort4);
+int4 __ovld __cnfn convert_int4_rtn(ushort4);
+int4 __ovld __cnfn convert_int4_sat_rtn(ushort4);
+int4 __ovld __cnfn convert_int4(ushort4);
+int4 __ovld __cnfn convert_int4_sat(ushort4);
+int4 __ovld __cnfn convert_int4_rte(int4);
+int4 __ovld __cnfn convert_int4_sat_rte(int4);
+int4 __ovld __cnfn convert_int4_rtz(int4);
+int4 __ovld __cnfn convert_int4_sat_rtz(int4);
+int4 __ovld __cnfn convert_int4_rtp(int4);
+int4 __ovld __cnfn convert_int4_sat_rtp(int4);
+int4 __ovld __cnfn convert_int4_rtn(int4);
+int4 __ovld __cnfn convert_int4_sat_rtn(int4);
+int4 __ovld __cnfn convert_int4(int4);
+int4 __ovld __cnfn convert_int4_sat(int4);
+int4 __ovld __cnfn convert_int4_rte(uint4);
+int4 __ovld __cnfn convert_int4_sat_rte(uint4);
+int4 __ovld __cnfn convert_int4_rtz(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtz(uint4);
+int4 __ovld __cnfn convert_int4_rtp(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtp(uint4);
+int4 __ovld __cnfn convert_int4_rtn(uint4);
+int4 __ovld __cnfn convert_int4_sat_rtn(uint4);
+int4 __ovld __cnfn convert_int4(uint4);
+int4 __ovld __cnfn convert_int4_sat(uint4);
+int4 __ovld __cnfn convert_int4_rte(long4);
+int4 __ovld __cnfn convert_int4_sat_rte(long4);
+int4 __ovld __cnfn convert_int4_rtz(long4);
+int4 __ovld __cnfn convert_int4_sat_rtz(long4);
+int4 __ovld __cnfn convert_int4_rtp(long4);
+int4 __ovld __cnfn convert_int4_sat_rtp(long4);
+int4 __ovld __cnfn convert_int4_rtn(long4);
+int4 __ovld __cnfn convert_int4_sat_rtn(long4);
+int4 __ovld __cnfn convert_int4(long4);
+int4 __ovld __cnfn convert_int4_sat(long4);
+int4 __ovld __cnfn convert_int4_rte(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rte(ulong4);
+int4 __ovld __cnfn convert_int4_rtz(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtz(ulong4);
+int4 __ovld __cnfn convert_int4_rtp(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtp(ulong4);
+int4 __ovld __cnfn convert_int4_rtn(ulong4);
+int4 __ovld __cnfn convert_int4_sat_rtn(ulong4);
+int4 __ovld __cnfn convert_int4(ulong4);
+int4 __ovld __cnfn convert_int4_sat(ulong4);
+int4 __ovld __cnfn convert_int4_rte(float4);
+int4 __ovld __cnfn convert_int4_sat_rte(float4);
+int4 __ovld __cnfn convert_int4_rtz(float4);
+int4 __ovld __cnfn convert_int4_sat_rtz(float4);
+int4 __ovld __cnfn convert_int4_rtp(float4);
+int4 __ovld __cnfn convert_int4_sat_rtp(float4);
+int4 __ovld __cnfn convert_int4_rtn(float4);
+int4 __ovld __cnfn convert_int4_sat_rtn(float4);
+int4 __ovld __cnfn convert_int4(float4);
+int4 __ovld __cnfn convert_int4_sat(float4);
+uint4 __ovld __cnfn convert_uint4_rte(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(char4);
+uint4 __ovld __cnfn convert_uint4_rtz(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(char4);
+uint4 __ovld __cnfn convert_uint4_rtp(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(char4);
+uint4 __ovld __cnfn convert_uint4_rtn(char4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(char4);
+uint4 __ovld __cnfn convert_uint4(char4);
+uint4 __ovld __cnfn convert_uint4_sat(char4);
+uint4 __ovld __cnfn convert_uint4_rte(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtz(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtp(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4);
+uint4 __ovld __cnfn convert_uint4_rtn(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4);
+uint4 __ovld __cnfn convert_uint4(uchar4);
+uint4 __ovld __cnfn convert_uint4_sat(uchar4);
+uint4 __ovld __cnfn convert_uint4_rte(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(short4);
+uint4 __ovld __cnfn convert_uint4_rtz(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(short4);
+uint4 __ovld __cnfn convert_uint4_rtp(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(short4);
+uint4 __ovld __cnfn convert_uint4_rtn(short4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(short4);
+uint4 __ovld __cnfn convert_uint4(short4);
+uint4 __ovld __cnfn convert_uint4_sat(short4);
+uint4 __ovld __cnfn convert_uint4_rte(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtz(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtp(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4);
+uint4 __ovld __cnfn convert_uint4_rtn(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4);
+uint4 __ovld __cnfn convert_uint4(ushort4);
+uint4 __ovld __cnfn convert_uint4_sat(ushort4);
+uint4 __ovld __cnfn convert_uint4_rte(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(int4);
+uint4 __ovld __cnfn convert_uint4_rtz(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(int4);
+uint4 __ovld __cnfn convert_uint4_rtp(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(int4);
+uint4 __ovld __cnfn convert_uint4_rtn(int4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(int4);
+uint4 __ovld __cnfn convert_uint4(int4);
+uint4 __ovld __cnfn convert_uint4_sat(int4);
+uint4 __ovld __cnfn convert_uint4_rte(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(uint4);
+uint4 __ovld __cnfn convert_uint4_rtz(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4);
+uint4 __ovld __cnfn convert_uint4_rtp(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4);
+uint4 __ovld __cnfn convert_uint4_rtn(uint4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4);
+uint4 __ovld __cnfn convert_uint4(uint4);
+uint4 __ovld __cnfn convert_uint4_sat(uint4);
+uint4 __ovld __cnfn convert_uint4_rte(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(long4);
+uint4 __ovld __cnfn convert_uint4_rtz(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(long4);
+uint4 __ovld __cnfn convert_uint4_rtp(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(long4);
+uint4 __ovld __cnfn convert_uint4_rtn(long4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(long4);
+uint4 __ovld __cnfn convert_uint4(long4);
+uint4 __ovld __cnfn convert_uint4_sat(long4);
+uint4 __ovld __cnfn convert_uint4_rte(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtz(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtp(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4);
+uint4 __ovld __cnfn convert_uint4_rtn(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4);
+uint4 __ovld __cnfn convert_uint4(ulong4);
+uint4 __ovld __cnfn convert_uint4_sat(ulong4);
+uint4 __ovld __cnfn convert_uint4_rte(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(float4);
+uint4 __ovld __cnfn convert_uint4_rtz(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(float4);
+uint4 __ovld __cnfn convert_uint4_rtp(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(float4);
+uint4 __ovld __cnfn convert_uint4_rtn(float4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(float4);
+uint4 __ovld __cnfn convert_uint4(float4);
+uint4 __ovld __cnfn convert_uint4_sat(float4);
+long4 __ovld __cnfn convert_long4_rte(char4);
+long4 __ovld __cnfn convert_long4_sat_rte(char4);
+long4 __ovld __cnfn convert_long4_rtz(char4);
+long4 __ovld __cnfn convert_long4_sat_rtz(char4);
+long4 __ovld __cnfn convert_long4_rtp(char4);
+long4 __ovld __cnfn convert_long4_sat_rtp(char4);
+long4 __ovld __cnfn convert_long4_rtn(char4);
+long4 __ovld __cnfn convert_long4_sat_rtn(char4);
+long4 __ovld __cnfn convert_long4(char4);
+long4 __ovld __cnfn convert_long4_sat(char4);
+long4 __ovld __cnfn convert_long4_rte(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rte(uchar4);
+long4 __ovld __cnfn convert_long4_rtz(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtz(uchar4);
+long4 __ovld __cnfn convert_long4_rtp(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtp(uchar4);
+long4 __ovld __cnfn convert_long4_rtn(uchar4);
+long4 __ovld __cnfn convert_long4_sat_rtn(uchar4);
+long4 __ovld __cnfn convert_long4(uchar4);
+long4 __ovld __cnfn convert_long4_sat(uchar4);
+long4 __ovld __cnfn convert_long4_rte(short4);
+long4 __ovld __cnfn convert_long4_sat_rte(short4);
+long4 __ovld __cnfn convert_long4_rtz(short4);
+long4 __ovld __cnfn convert_long4_sat_rtz(short4);
+long4 __ovld __cnfn convert_long4_rtp(short4);
+long4 __ovld __cnfn convert_long4_sat_rtp(short4);
+long4 __ovld __cnfn convert_long4_rtn(short4);
+long4 __ovld __cnfn convert_long4_sat_rtn(short4);
+long4 __ovld __cnfn convert_long4(short4);
+long4 __ovld __cnfn convert_long4_sat(short4);
+long4 __ovld __cnfn convert_long4_rte(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rte(ushort4);
+long4 __ovld __cnfn convert_long4_rtz(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtz(ushort4);
+long4 __ovld __cnfn convert_long4_rtp(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtp(ushort4);
+long4 __ovld __cnfn convert_long4_rtn(ushort4);
+long4 __ovld __cnfn convert_long4_sat_rtn(ushort4);
+long4 __ovld __cnfn convert_long4(ushort4);
+long4 __ovld __cnfn convert_long4_sat(ushort4);
+long4 __ovld __cnfn convert_long4_rte(int4);
+long4 __ovld __cnfn convert_long4_sat_rte(int4);
+long4 __ovld __cnfn convert_long4_rtz(int4);
+long4 __ovld __cnfn convert_long4_sat_rtz(int4);
+long4 __ovld __cnfn convert_long4_rtp(int4);
+long4 __ovld __cnfn convert_long4_sat_rtp(int4);
+long4 __ovld __cnfn convert_long4_rtn(int4);
+long4 __ovld __cnfn convert_long4_sat_rtn(int4);
+long4 __ovld __cnfn convert_long4(int4);
+long4 __ovld __cnfn convert_long4_sat(int4);
+long4 __ovld __cnfn convert_long4_rte(uint4);
+long4 __ovld __cnfn convert_long4_sat_rte(uint4);
+long4 __ovld __cnfn convert_long4_rtz(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtz(uint4);
+long4 __ovld __cnfn convert_long4_rtp(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtp(uint4);
+long4 __ovld __cnfn convert_long4_rtn(uint4);
+long4 __ovld __cnfn convert_long4_sat_rtn(uint4);
+long4 __ovld __cnfn convert_long4(uint4);
+long4 __ovld __cnfn convert_long4_sat(uint4);
+long4 __ovld __cnfn convert_long4_rte(long4);
+long4 __ovld __cnfn convert_long4_sat_rte(long4);
+long4 __ovld __cnfn convert_long4_rtz(long4);
+long4 __ovld __cnfn convert_long4_sat_rtz(long4);
+long4 __ovld __cnfn convert_long4_rtp(long4);
+long4 __ovld __cnfn convert_long4_sat_rtp(long4);
+long4 __ovld __cnfn convert_long4_rtn(long4);
+long4 __ovld __cnfn convert_long4_sat_rtn(long4);
+long4 __ovld __cnfn convert_long4(long4);
+long4 __ovld __cnfn convert_long4_sat(long4);
+long4 __ovld __cnfn convert_long4_rte(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rte(ulong4);
+long4 __ovld __cnfn convert_long4_rtz(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtz(ulong4);
+long4 __ovld __cnfn convert_long4_rtp(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtp(ulong4);
+long4 __ovld __cnfn convert_long4_rtn(ulong4);
+long4 __ovld __cnfn convert_long4_sat_rtn(ulong4);
+long4 __ovld __cnfn convert_long4(ulong4);
+long4 __ovld __cnfn convert_long4_sat(ulong4);
+long4 __ovld __cnfn convert_long4_rte(float4);
+long4 __ovld __cnfn convert_long4_sat_rte(float4);
+long4 __ovld __cnfn convert_long4_rtz(float4);
+long4 __ovld __cnfn convert_long4_sat_rtz(float4);
+long4 __ovld __cnfn convert_long4_rtp(float4);
+long4 __ovld __cnfn convert_long4_sat_rtp(float4);
+long4 __ovld __cnfn convert_long4_rtn(float4);
+long4 __ovld __cnfn convert_long4_sat_rtn(float4);
+long4 __ovld __cnfn convert_long4(float4);
+long4 __ovld __cnfn convert_long4_sat(float4);
+ulong4 __ovld __cnfn convert_ulong4_rte(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4);
+ulong4 __ovld __cnfn convert_ulong4(char4);
+ulong4 __ovld __cnfn convert_ulong4_sat(char4);
+ulong4 __ovld __cnfn convert_ulong4_rte(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4);
+ulong4 __ovld __cnfn convert_ulong4(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_sat(uchar4);
+ulong4 __ovld __cnfn convert_ulong4_rte(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4);
+ulong4 __ovld __cnfn convert_ulong4(short4);
+ulong4 __ovld __cnfn convert_ulong4_sat(short4);
+ulong4 __ovld __cnfn convert_ulong4_rte(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4);
+ulong4 __ovld __cnfn convert_ulong4(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_sat(ushort4);
+ulong4 __ovld __cnfn convert_ulong4_rte(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4);
+ulong4 __ovld __cnfn convert_ulong4(int4);
+ulong4 __ovld __cnfn convert_ulong4_sat(int4);
+ulong4 __ovld __cnfn convert_ulong4_rte(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4);
+ulong4 __ovld __cnfn convert_ulong4(uint4);
+ulong4 __ovld __cnfn convert_ulong4_sat(uint4);
+ulong4 __ovld __cnfn convert_ulong4_rte(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4);
+ulong4 __ovld __cnfn convert_ulong4(long4);
+ulong4 __ovld __cnfn convert_ulong4_sat(long4);
+ulong4 __ovld __cnfn convert_ulong4_rte(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4);
+ulong4 __ovld __cnfn convert_ulong4(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_sat(ulong4);
+ulong4 __ovld __cnfn convert_ulong4_rte(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4);
+ulong4 __ovld __cnfn convert_ulong4(float4);
+ulong4 __ovld __cnfn convert_ulong4_sat(float4);
+float4 __ovld __cnfn convert_float4_rte(char4);
+float4 __ovld __cnfn convert_float4_rtz(char4);
+float4 __ovld __cnfn convert_float4_rtp(char4);
+float4 __ovld __cnfn convert_float4_rtn(char4);
+float4 __ovld __cnfn convert_float4(char4);
+float4 __ovld __cnfn convert_float4_rte(uchar4);
+float4 __ovld __cnfn convert_float4_rtz(uchar4);
+float4 __ovld __cnfn convert_float4_rtp(uchar4);
+float4 __ovld __cnfn convert_float4_rtn(uchar4);
+float4 __ovld __cnfn convert_float4(uchar4);
+float4 __ovld __cnfn convert_float4_rte(short4);
+float4 __ovld __cnfn convert_float4_rtz(short4);
+float4 __ovld __cnfn convert_float4_rtp(short4);
+float4 __ovld __cnfn convert_float4_rtn(short4);
+float4 __ovld __cnfn convert_float4(short4);
+float4 __ovld __cnfn convert_float4_rte(ushort4);
+float4 __ovld __cnfn convert_float4_rtz(ushort4);
+float4 __ovld __cnfn convert_float4_rtp(ushort4);
+float4 __ovld __cnfn convert_float4_rtn(ushort4);
+float4 __ovld __cnfn convert_float4(ushort4);
+float4 __ovld __cnfn convert_float4_rte(int4);
+float4 __ovld __cnfn convert_float4_rtz(int4);
+float4 __ovld __cnfn convert_float4_rtp(int4);
+float4 __ovld __cnfn convert_float4_rtn(int4);
+float4 __ovld __cnfn convert_float4(int4);
+float4 __ovld __cnfn convert_float4_rte(uint4);
+float4 __ovld __cnfn convert_float4_rtz(uint4);
+float4 __ovld __cnfn convert_float4_rtp(uint4);
+float4 __ovld __cnfn convert_float4_rtn(uint4);
+float4 __ovld __cnfn convert_float4(uint4);
+float4 __ovld __cnfn convert_float4_rte(long4);
+float4 __ovld __cnfn convert_float4_rtz(long4);
+float4 __ovld __cnfn convert_float4_rtp(long4);
+float4 __ovld __cnfn convert_float4_rtn(long4);
+float4 __ovld __cnfn convert_float4(long4);
+float4 __ovld __cnfn convert_float4_rte(ulong4);
+float4 __ovld __cnfn convert_float4_rtz(ulong4);
+float4 __ovld __cnfn convert_float4_rtp(ulong4);
+float4 __ovld __cnfn convert_float4_rtn(ulong4);
+float4 __ovld __cnfn convert_float4(ulong4);
+float4 __ovld __cnfn convert_float4_rte(float4);
+float4 __ovld __cnfn convert_float4_rtz(float4);
+float4 __ovld __cnfn convert_float4_rtp(float4);
+float4 __ovld __cnfn convert_float4_rtn(float4);
+float4 __ovld __cnfn convert_float4(float4);
+char8 __ovld __cnfn convert_char8_rte(char8);
+char8 __ovld __cnfn convert_char8_sat_rte(char8);
+char8 __ovld __cnfn convert_char8_rtz(char8);
+char8 __ovld __cnfn convert_char8_sat_rtz(char8);
+char8 __ovld __cnfn convert_char8_rtp(char8);
+char8 __ovld __cnfn convert_char8_sat_rtp(char8);
+char8 __ovld __cnfn convert_char8_rtn(char8);
+char8 __ovld __cnfn convert_char8_sat_rtn(char8);
+char8 __ovld __cnfn convert_char8(char8);
+char8 __ovld __cnfn convert_char8_sat(char8);
+char8 __ovld __cnfn convert_char8_rte(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rte(uchar8);
+char8 __ovld __cnfn convert_char8_rtz(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtz(uchar8);
+char8 __ovld __cnfn convert_char8_rtp(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtp(uchar8);
+char8 __ovld __cnfn convert_char8_rtn(uchar8);
+char8 __ovld __cnfn convert_char8_sat_rtn(uchar8);
+char8 __ovld __cnfn convert_char8(uchar8);
+char8 __ovld __cnfn convert_char8_sat(uchar8);
+char8 __ovld __cnfn convert_char8_rte(short8);
+char8 __ovld __cnfn convert_char8_sat_rte(short8);
+char8 __ovld __cnfn convert_char8_rtz(short8);
+char8 __ovld __cnfn convert_char8_sat_rtz(short8);
+char8 __ovld __cnfn convert_char8_rtp(short8);
+char8 __ovld __cnfn convert_char8_sat_rtp(short8);
+char8 __ovld __cnfn convert_char8_rtn(short8);
+char8 __ovld __cnfn convert_char8_sat_rtn(short8);
+char8 __ovld __cnfn convert_char8(short8);
+char8 __ovld __cnfn convert_char8_sat(short8);
+char8 __ovld __cnfn convert_char8_rte(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rte(ushort8);
+char8 __ovld __cnfn convert_char8_rtz(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtz(ushort8);
+char8 __ovld __cnfn convert_char8_rtp(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtp(ushort8);
+char8 __ovld __cnfn convert_char8_rtn(ushort8);
+char8 __ovld __cnfn convert_char8_sat_rtn(ushort8);
+char8 __ovld __cnfn convert_char8(ushort8);
+char8 __ovld __cnfn convert_char8_sat(ushort8);
+char8 __ovld __cnfn convert_char8_rte(int8);
+char8 __ovld __cnfn convert_char8_sat_rte(int8);
+char8 __ovld __cnfn convert_char8_rtz(int8);
+char8 __ovld __cnfn convert_char8_sat_rtz(int8);
+char8 __ovld __cnfn convert_char8_rtp(int8);
+char8 __ovld __cnfn convert_char8_sat_rtp(int8);
+char8 __ovld __cnfn convert_char8_rtn(int8);
+char8 __ovld __cnfn convert_char8_sat_rtn(int8);
+char8 __ovld __cnfn convert_char8(int8);
+char8 __ovld __cnfn convert_char8_sat(int8);
+char8 __ovld __cnfn convert_char8_rte(uint8);
+char8 __ovld __cnfn convert_char8_sat_rte(uint8);
+char8 __ovld __cnfn convert_char8_rtz(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtz(uint8);
+char8 __ovld __cnfn convert_char8_rtp(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtp(uint8);
+char8 __ovld __cnfn convert_char8_rtn(uint8);
+char8 __ovld __cnfn convert_char8_sat_rtn(uint8);
+char8 __ovld __cnfn convert_char8(uint8);
+char8 __ovld __cnfn convert_char8_sat(uint8);
+char8 __ovld __cnfn convert_char8_rte(long8);
+char8 __ovld __cnfn convert_char8_sat_rte(long8);
+char8 __ovld __cnfn convert_char8_rtz(long8);
+char8 __ovld __cnfn convert_char8_sat_rtz(long8);
+char8 __ovld __cnfn convert_char8_rtp(long8);
+char8 __ovld __cnfn convert_char8_sat_rtp(long8);
+char8 __ovld __cnfn convert_char8_rtn(long8);
+char8 __ovld __cnfn convert_char8_sat_rtn(long8);
+char8 __ovld __cnfn convert_char8(long8);
+char8 __ovld __cnfn convert_char8_sat(long8);
+char8 __ovld __cnfn convert_char8_rte(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rte(ulong8);
+char8 __ovld __cnfn convert_char8_rtz(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtz(ulong8);
+char8 __ovld __cnfn convert_char8_rtp(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtp(ulong8);
+char8 __ovld __cnfn convert_char8_rtn(ulong8);
+char8 __ovld __cnfn convert_char8_sat_rtn(ulong8);
+char8 __ovld __cnfn convert_char8(ulong8);
+char8 __ovld __cnfn convert_char8_sat(ulong8);
+char8 __ovld __cnfn convert_char8_rte(float8);
+char8 __ovld __cnfn convert_char8_sat_rte(float8);
+char8 __ovld __cnfn convert_char8_rtz(float8);
+char8 __ovld __cnfn convert_char8_sat_rtz(float8);
+char8 __ovld __cnfn convert_char8_rtp(float8);
+char8 __ovld __cnfn convert_char8_sat_rtp(float8);
+char8 __ovld __cnfn convert_char8_rtn(float8);
+char8 __ovld __cnfn convert_char8_sat_rtn(float8);
+char8 __ovld __cnfn convert_char8(float8);
+char8 __ovld __cnfn convert_char8_sat(float8);
+uchar8 __ovld __cnfn convert_uchar8_rte(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8);
+uchar8 __ovld __cnfn convert_uchar8(char8);
+uchar8 __ovld __cnfn convert_uchar8_sat(char8);
+uchar8 __ovld __cnfn convert_uchar8_rte(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8);
+uchar8 __ovld __cnfn convert_uchar8(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_sat(uchar8);
+uchar8 __ovld __cnfn convert_uchar8_rte(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8);
+uchar8 __ovld __cnfn convert_uchar8(short8);
+uchar8 __ovld __cnfn convert_uchar8_sat(short8);
+uchar8 __ovld __cnfn convert_uchar8_rte(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8);
+uchar8 __ovld __cnfn convert_uchar8(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_sat(ushort8);
+uchar8 __ovld __cnfn convert_uchar8_rte(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8);
+uchar8 __ovld __cnfn convert_uchar8(int8);
+uchar8 __ovld __cnfn convert_uchar8_sat(int8);
+uchar8 __ovld __cnfn convert_uchar8_rte(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8);
+uchar8 __ovld __cnfn convert_uchar8(uint8);
+uchar8 __ovld __cnfn convert_uchar8_sat(uint8);
+uchar8 __ovld __cnfn convert_uchar8_rte(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8);
+uchar8 __ovld __cnfn convert_uchar8(long8);
+uchar8 __ovld __cnfn convert_uchar8_sat(long8);
+uchar8 __ovld __cnfn convert_uchar8_rte(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8);
+uchar8 __ovld __cnfn convert_uchar8(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_sat(ulong8);
+uchar8 __ovld __cnfn convert_uchar8_rte(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8);
+uchar8 __ovld __cnfn convert_uchar8(float8);
+uchar8 __ovld __cnfn convert_uchar8_sat(float8);
+short8 __ovld __cnfn convert_short8_rte(char8);
+short8 __ovld __cnfn convert_short8_sat_rte(char8);
+short8 __ovld __cnfn convert_short8_rtz(char8);
+short8 __ovld __cnfn convert_short8_sat_rtz(char8);
+short8 __ovld __cnfn convert_short8_rtp(char8);
+short8 __ovld __cnfn convert_short8_sat_rtp(char8);
+short8 __ovld __cnfn convert_short8_rtn(char8);
+short8 __ovld __cnfn convert_short8_sat_rtn(char8);
+short8 __ovld __cnfn convert_short8(char8);
+short8 __ovld __cnfn convert_short8_sat(char8);
+short8 __ovld __cnfn convert_short8_rte(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rte(uchar8);
+short8 __ovld __cnfn convert_short8_rtz(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtz(uchar8);
+short8 __ovld __cnfn convert_short8_rtp(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtp(uchar8);
+short8 __ovld __cnfn convert_short8_rtn(uchar8);
+short8 __ovld __cnfn convert_short8_sat_rtn(uchar8);
+short8 __ovld __cnfn convert_short8(uchar8);
+short8 __ovld __cnfn convert_short8_sat(uchar8);
+short8 __ovld __cnfn convert_short8_rte(short8);
+short8 __ovld __cnfn convert_short8_sat_rte(short8);
+short8 __ovld __cnfn convert_short8_rtz(short8);
+short8 __ovld __cnfn convert_short8_sat_rtz(short8);
+short8 __ovld __cnfn convert_short8_rtp(short8);
+short8 __ovld __cnfn convert_short8_sat_rtp(short8);
+short8 __ovld __cnfn convert_short8_rtn(short8);
+short8 __ovld __cnfn convert_short8_sat_rtn(short8);
+short8 __ovld __cnfn convert_short8(short8);
+short8 __ovld __cnfn convert_short8_sat(short8);
+short8 __ovld __cnfn convert_short8_rte(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rte(ushort8);
+short8 __ovld __cnfn convert_short8_rtz(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtz(ushort8);
+short8 __ovld __cnfn convert_short8_rtp(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtp(ushort8);
+short8 __ovld __cnfn convert_short8_rtn(ushort8);
+short8 __ovld __cnfn convert_short8_sat_rtn(ushort8);
+short8 __ovld __cnfn convert_short8(ushort8);
+short8 __ovld __cnfn convert_short8_sat(ushort8);
+short8 __ovld __cnfn convert_short8_rte(int8);
+short8 __ovld __cnfn convert_short8_sat_rte(int8);
+short8 __ovld __cnfn convert_short8_rtz(int8);
+short8 __ovld __cnfn convert_short8_sat_rtz(int8);
+short8 __ovld __cnfn convert_short8_rtp(int8);
+short8 __ovld __cnfn convert_short8_sat_rtp(int8);
+short8 __ovld __cnfn convert_short8_rtn(int8);
+short8 __ovld __cnfn convert_short8_sat_rtn(int8);
+short8 __ovld __cnfn convert_short8(int8);
+short8 __ovld __cnfn convert_short8_sat(int8);
+short8 __ovld __cnfn convert_short8_rte(uint8);
+short8 __ovld __cnfn convert_short8_sat_rte(uint8);
+short8 __ovld __cnfn convert_short8_rtz(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtz(uint8);
+short8 __ovld __cnfn convert_short8_rtp(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtp(uint8);
+short8 __ovld __cnfn convert_short8_rtn(uint8);
+short8 __ovld __cnfn convert_short8_sat_rtn(uint8);
+short8 __ovld __cnfn convert_short8(uint8);
+short8 __ovld __cnfn convert_short8_sat(uint8);
+short8 __ovld __cnfn convert_short8_rte(long8);
+short8 __ovld __cnfn convert_short8_sat_rte(long8);
+short8 __ovld __cnfn convert_short8_rtz(long8);
+short8 __ovld __cnfn convert_short8_sat_rtz(long8);
+short8 __ovld __cnfn convert_short8_rtp(long8);
+short8 __ovld __cnfn convert_short8_sat_rtp(long8);
+short8 __ovld __cnfn convert_short8_rtn(long8);
+short8 __ovld __cnfn convert_short8_sat_rtn(long8);
+short8 __ovld __cnfn convert_short8(long8);
+short8 __ovld __cnfn convert_short8_sat(long8);
+short8 __ovld __cnfn convert_short8_rte(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rte(ulong8);
+short8 __ovld __cnfn convert_short8_rtz(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtz(ulong8);
+short8 __ovld __cnfn convert_short8_rtp(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtp(ulong8);
+short8 __ovld __cnfn convert_short8_rtn(ulong8);
+short8 __ovld __cnfn convert_short8_sat_rtn(ulong8);
+short8 __ovld __cnfn convert_short8(ulong8);
+short8 __ovld __cnfn convert_short8_sat(ulong8);
+short8 __ovld __cnfn convert_short8_rte(float8);
+short8 __ovld __cnfn convert_short8_sat_rte(float8);
+short8 __ovld __cnfn convert_short8_rtz(float8);
+short8 __ovld __cnfn convert_short8_sat_rtz(float8);
+short8 __ovld __cnfn convert_short8_rtp(float8);
+short8 __ovld __cnfn convert_short8_sat_rtp(float8);
+short8 __ovld __cnfn convert_short8_rtn(float8);
+short8 __ovld __cnfn convert_short8_sat_rtn(float8);
+short8 __ovld __cnfn convert_short8(float8);
+short8 __ovld __cnfn convert_short8_sat(float8);
+ushort8 __ovld __cnfn convert_ushort8_rte(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8);
+ushort8 __ovld __cnfn convert_ushort8(char8);
+ushort8 __ovld __cnfn convert_ushort8_sat(char8);
+ushort8 __ovld __cnfn convert_ushort8_rte(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8);
+ushort8 __ovld __cnfn convert_ushort8(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_sat(uchar8);
+ushort8 __ovld __cnfn convert_ushort8_rte(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8);
+ushort8 __ovld __cnfn convert_ushort8(short8);
+ushort8 __ovld __cnfn convert_ushort8_sat(short8);
+ushort8 __ovld __cnfn convert_ushort8_rte(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8);
+ushort8 __ovld __cnfn convert_ushort8(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_sat(ushort8);
+ushort8 __ovld __cnfn convert_ushort8_rte(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8);
+ushort8 __ovld __cnfn convert_ushort8(int8);
+ushort8 __ovld __cnfn convert_ushort8_sat(int8);
+ushort8 __ovld __cnfn convert_ushort8_rte(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8);
+ushort8 __ovld __cnfn convert_ushort8(uint8);
+ushort8 __ovld __cnfn convert_ushort8_sat(uint8);
+ushort8 __ovld __cnfn convert_ushort8_rte(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8);
+ushort8 __ovld __cnfn convert_ushort8(long8);
+ushort8 __ovld __cnfn convert_ushort8_sat(long8);
+ushort8 __ovld __cnfn convert_ushort8_rte(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8);
+ushort8 __ovld __cnfn convert_ushort8(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_sat(ulong8);
+ushort8 __ovld __cnfn convert_ushort8_rte(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8);
+ushort8 __ovld __cnfn convert_ushort8(float8);
+ushort8 __ovld __cnfn convert_ushort8_sat(float8);
+int8 __ovld __cnfn convert_int8_rte(char8);
+int8 __ovld __cnfn convert_int8_sat_rte(char8);
+int8 __ovld __cnfn convert_int8_rtz(char8);
+int8 __ovld __cnfn convert_int8_sat_rtz(char8);
+int8 __ovld __cnfn convert_int8_rtp(char8);
+int8 __ovld __cnfn convert_int8_sat_rtp(char8);
+int8 __ovld __cnfn convert_int8_rtn(char8);
+int8 __ovld __cnfn convert_int8_sat_rtn(char8);
+int8 __ovld __cnfn convert_int8(char8);
+int8 __ovld __cnfn convert_int8_sat(char8);
+int8 __ovld __cnfn convert_int8_rte(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rte(uchar8);
+int8 __ovld __cnfn convert_int8_rtz(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtz(uchar8);
+int8 __ovld __cnfn convert_int8_rtp(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtp(uchar8);
+int8 __ovld __cnfn convert_int8_rtn(uchar8);
+int8 __ovld __cnfn convert_int8_sat_rtn(uchar8);
+int8 __ovld __cnfn convert_int8(uchar8);
+int8 __ovld __cnfn convert_int8_sat(uchar8);
+int8 __ovld __cnfn convert_int8_rte(short8);
+int8 __ovld __cnfn convert_int8_sat_rte(short8);
+int8 __ovld __cnfn convert_int8_rtz(short8);
+int8 __ovld __cnfn convert_int8_sat_rtz(short8);
+int8 __ovld __cnfn convert_int8_rtp(short8);
+int8 __ovld __cnfn convert_int8_sat_rtp(short8);
+int8 __ovld __cnfn convert_int8_rtn(short8);
+int8 __ovld __cnfn convert_int8_sat_rtn(short8);
+int8 __ovld __cnfn convert_int8(short8);
+int8 __ovld __cnfn convert_int8_sat(short8);
+int8 __ovld __cnfn convert_int8_rte(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rte(ushort8);
+int8 __ovld __cnfn convert_int8_rtz(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtz(ushort8);
+int8 __ovld __cnfn convert_int8_rtp(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtp(ushort8);
+int8 __ovld __cnfn convert_int8_rtn(ushort8);
+int8 __ovld __cnfn convert_int8_sat_rtn(ushort8);
+int8 __ovld __cnfn convert_int8(ushort8);
+int8 __ovld __cnfn convert_int8_sat(ushort8);
+int8 __ovld __cnfn convert_int8_rte(int8);
+int8 __ovld __cnfn convert_int8_sat_rte(int8);
+int8 __ovld __cnfn convert_int8_rtz(int8);
+int8 __ovld __cnfn convert_int8_sat_rtz(int8);
+int8 __ovld __cnfn convert_int8_rtp(int8);
+int8 __ovld __cnfn convert_int8_sat_rtp(int8);
+int8 __ovld __cnfn convert_int8_rtn(int8);
+int8 __ovld __cnfn convert_int8_sat_rtn(int8);
+int8 __ovld __cnfn convert_int8(int8);
+int8 __ovld __cnfn convert_int8_sat(int8);
+int8 __ovld __cnfn convert_int8_rte(uint8);
+int8 __ovld __cnfn convert_int8_sat_rte(uint8);
+int8 __ovld __cnfn convert_int8_rtz(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtz(uint8);
+int8 __ovld __cnfn convert_int8_rtp(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtp(uint8);
+int8 __ovld __cnfn convert_int8_rtn(uint8);
+int8 __ovld __cnfn convert_int8_sat_rtn(uint8);
+int8 __ovld __cnfn convert_int8(uint8);
+int8 __ovld __cnfn convert_int8_sat(uint8);
+int8 __ovld __cnfn convert_int8_rte(long8);
+int8 __ovld __cnfn convert_int8_sat_rte(long8);
+int8 __ovld __cnfn convert_int8_rtz(long8);
+int8 __ovld __cnfn convert_int8_sat_rtz(long8);
+int8 __ovld __cnfn convert_int8_rtp(long8);
+int8 __ovld __cnfn convert_int8_sat_rtp(long8);
+int8 __ovld __cnfn convert_int8_rtn(long8);
+int8 __ovld __cnfn convert_int8_sat_rtn(long8);
+int8 __ovld __cnfn convert_int8(long8);
+int8 __ovld __cnfn convert_int8_sat(long8);
+int8 __ovld __cnfn convert_int8_rte(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rte(ulong8);
+int8 __ovld __cnfn convert_int8_rtz(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtz(ulong8);
+int8 __ovld __cnfn convert_int8_rtp(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtp(ulong8);
+int8 __ovld __cnfn convert_int8_rtn(ulong8);
+int8 __ovld __cnfn convert_int8_sat_rtn(ulong8);
+int8 __ovld __cnfn convert_int8(ulong8);
+int8 __ovld __cnfn convert_int8_sat(ulong8);
+int8 __ovld __cnfn convert_int8_rte(float8);
+int8 __ovld __cnfn convert_int8_sat_rte(float8);
+int8 __ovld __cnfn convert_int8_rtz(float8);
+int8 __ovld __cnfn convert_int8_sat_rtz(float8);
+int8 __ovld __cnfn convert_int8_rtp(float8);
+int8 __ovld __cnfn convert_int8_sat_rtp(float8);
+int8 __ovld __cnfn convert_int8_rtn(float8);
+int8 __ovld __cnfn convert_int8_sat_rtn(float8);
+int8 __ovld __cnfn convert_int8(float8);
+int8 __ovld __cnfn convert_int8_sat(float8);
+uint8 __ovld __cnfn convert_uint8_rte(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(char8);
+uint8 __ovld __cnfn convert_uint8_rtz(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(char8);
+uint8 __ovld __cnfn convert_uint8_rtp(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(char8);
+uint8 __ovld __cnfn convert_uint8_rtn(char8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(char8);
+uint8 __ovld __cnfn convert_uint8(char8);
+uint8 __ovld __cnfn convert_uint8_sat(char8);
+uint8 __ovld __cnfn convert_uint8_rte(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtz(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtp(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8);
+uint8 __ovld __cnfn convert_uint8_rtn(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8);
+uint8 __ovld __cnfn convert_uint8(uchar8);
+uint8 __ovld __cnfn convert_uint8_sat(uchar8);
+uint8 __ovld __cnfn convert_uint8_rte(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(short8);
+uint8 __ovld __cnfn convert_uint8_rtz(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(short8);
+uint8 __ovld __cnfn convert_uint8_rtp(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(short8);
+uint8 __ovld __cnfn convert_uint8_rtn(short8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(short8);
+uint8 __ovld __cnfn convert_uint8(short8);
+uint8 __ovld __cnfn convert_uint8_sat(short8);
+uint8 __ovld __cnfn convert_uint8_rte(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtz(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtp(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8);
+uint8 __ovld __cnfn convert_uint8_rtn(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8);
+uint8 __ovld __cnfn convert_uint8(ushort8);
+uint8 __ovld __cnfn convert_uint8_sat(ushort8);
+uint8 __ovld __cnfn convert_uint8_rte(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(int8);
+uint8 __ovld __cnfn convert_uint8_rtz(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(int8);
+uint8 __ovld __cnfn convert_uint8_rtp(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(int8);
+uint8 __ovld __cnfn convert_uint8_rtn(int8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(int8);
+uint8 __ovld __cnfn convert_uint8(int8);
+uint8 __ovld __cnfn convert_uint8_sat(int8);
+uint8 __ovld __cnfn convert_uint8_rte(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(uint8);
+uint8 __ovld __cnfn convert_uint8_rtz(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8);
+uint8 __ovld __cnfn convert_uint8_rtp(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8);
+uint8 __ovld __cnfn convert_uint8_rtn(uint8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8);
+uint8 __ovld __cnfn convert_uint8(uint8);
+uint8 __ovld __cnfn convert_uint8_sat(uint8);
+uint8 __ovld __cnfn convert_uint8_rte(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(long8);
+uint8 __ovld __cnfn convert_uint8_rtz(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(long8);
+uint8 __ovld __cnfn convert_uint8_rtp(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(long8);
+uint8 __ovld __cnfn convert_uint8_rtn(long8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(long8);
+uint8 __ovld __cnfn convert_uint8(long8);
+uint8 __ovld __cnfn convert_uint8_sat(long8);
+uint8 __ovld __cnfn convert_uint8_rte(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtz(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtp(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8);
+uint8 __ovld __cnfn convert_uint8_rtn(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8);
+uint8 __ovld __cnfn convert_uint8(ulong8);
+uint8 __ovld __cnfn convert_uint8_sat(ulong8);
+uint8 __ovld __cnfn convert_uint8_rte(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(float8);
+uint8 __ovld __cnfn convert_uint8_rtz(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(float8);
+uint8 __ovld __cnfn convert_uint8_rtp(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(float8);
+uint8 __ovld __cnfn convert_uint8_rtn(float8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(float8);
+uint8 __ovld __cnfn convert_uint8(float8);
+uint8 __ovld __cnfn convert_uint8_sat(float8);
+long8 __ovld __cnfn convert_long8_rte(char8);
+long8 __ovld __cnfn convert_long8_sat_rte(char8);
+long8 __ovld __cnfn convert_long8_rtz(char8);
+long8 __ovld __cnfn convert_long8_sat_rtz(char8);
+long8 __ovld __cnfn convert_long8_rtp(char8);
+long8 __ovld __cnfn convert_long8_sat_rtp(char8);
+long8 __ovld __cnfn convert_long8_rtn(char8);
+long8 __ovld __cnfn convert_long8_sat_rtn(char8);
+long8 __ovld __cnfn convert_long8(char8);
+long8 __ovld __cnfn convert_long8_sat(char8);
+long8 __ovld __cnfn convert_long8_rte(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rte(uchar8);
+long8 __ovld __cnfn convert_long8_rtz(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtz(uchar8);
+long8 __ovld __cnfn convert_long8_rtp(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtp(uchar8);
+long8 __ovld __cnfn convert_long8_rtn(uchar8);
+long8 __ovld __cnfn convert_long8_sat_rtn(uchar8);
+long8 __ovld __cnfn convert_long8(uchar8);
+long8 __ovld __cnfn convert_long8_sat(uchar8);
+long8 __ovld __cnfn convert_long8_rte(short8);
+long8 __ovld __cnfn convert_long8_sat_rte(short8);
+long8 __ovld __cnfn convert_long8_rtz(short8);
+long8 __ovld __cnfn convert_long8_sat_rtz(short8);
+long8 __ovld __cnfn convert_long8_rtp(short8);
+long8 __ovld __cnfn convert_long8_sat_rtp(short8);
+long8 __ovld __cnfn convert_long8_rtn(short8);
+long8 __ovld __cnfn convert_long8_sat_rtn(short8);
+long8 __ovld __cnfn convert_long8(short8);
+long8 __ovld __cnfn convert_long8_sat(short8);
+long8 __ovld __cnfn convert_long8_rte(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rte(ushort8);
+long8 __ovld __cnfn convert_long8_rtz(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtz(ushort8);
+long8 __ovld __cnfn convert_long8_rtp(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtp(ushort8);
+long8 __ovld __cnfn convert_long8_rtn(ushort8);
+long8 __ovld __cnfn convert_long8_sat_rtn(ushort8);
+long8 __ovld __cnfn convert_long8(ushort8);
+long8 __ovld __cnfn convert_long8_sat(ushort8);
+long8 __ovld __cnfn convert_long8_rte(int8);
+long8 __ovld __cnfn convert_long8_sat_rte(int8);
+long8 __ovld __cnfn convert_long8_rtz(int8);
+long8 __ovld __cnfn convert_long8_sat_rtz(int8);
+long8 __ovld __cnfn convert_long8_rtp(int8);
+long8 __ovld __cnfn convert_long8_sat_rtp(int8);
+long8 __ovld __cnfn convert_long8_rtn(int8);
+long8 __ovld __cnfn convert_long8_sat_rtn(int8);
+long8 __ovld __cnfn convert_long8(int8);
+long8 __ovld __cnfn convert_long8_sat(int8);
+long8 __ovld __cnfn convert_long8_rte(uint8);
+long8 __ovld __cnfn convert_long8_sat_rte(uint8);
+long8 __ovld __cnfn convert_long8_rtz(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtz(uint8);
+long8 __ovld __cnfn convert_long8_rtp(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtp(uint8);
+long8 __ovld __cnfn convert_long8_rtn(uint8);
+long8 __ovld __cnfn convert_long8_sat_rtn(uint8);
+long8 __ovld __cnfn convert_long8(uint8);
+long8 __ovld __cnfn convert_long8_sat(uint8);
+long8 __ovld __cnfn convert_long8_rte(long8);
+long8 __ovld __cnfn convert_long8_sat_rte(long8);
+long8 __ovld __cnfn convert_long8_rtz(long8);
+long8 __ovld __cnfn convert_long8_sat_rtz(long8);
+long8 __ovld __cnfn convert_long8_rtp(long8);
+long8 __ovld __cnfn convert_long8_sat_rtp(long8);
+long8 __ovld __cnfn convert_long8_rtn(long8);
+long8 __ovld __cnfn convert_long8_sat_rtn(long8);
+long8 __ovld __cnfn convert_long8(long8);
+long8 __ovld __cnfn convert_long8_sat(long8);
+long8 __ovld __cnfn convert_long8_rte(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rte(ulong8);
+long8 __ovld __cnfn convert_long8_rtz(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtz(ulong8);
+long8 __ovld __cnfn convert_long8_rtp(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtp(ulong8);
+long8 __ovld __cnfn convert_long8_rtn(ulong8);
+long8 __ovld __cnfn convert_long8_sat_rtn(ulong8);
+long8 __ovld __cnfn convert_long8(ulong8);
+long8 __ovld __cnfn convert_long8_sat(ulong8);
+long8 __ovld __cnfn convert_long8_rte(float8);
+long8 __ovld __cnfn convert_long8_sat_rte(float8);
+long8 __ovld __cnfn convert_long8_rtz(float8);
+long8 __ovld __cnfn convert_long8_sat_rtz(float8);
+long8 __ovld __cnfn convert_long8_rtp(float8);
+long8 __ovld __cnfn convert_long8_sat_rtp(float8);
+long8 __ovld __cnfn convert_long8_rtn(float8);
+long8 __ovld __cnfn convert_long8_sat_rtn(float8);
+long8 __ovld __cnfn convert_long8(float8);
+long8 __ovld __cnfn convert_long8_sat(float8);
+ulong8 __ovld __cnfn convert_ulong8_rte(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8);
+ulong8 __ovld __cnfn convert_ulong8(char8);
+ulong8 __ovld __cnfn convert_ulong8_sat(char8);
+ulong8 __ovld __cnfn convert_ulong8_rte(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8);
+ulong8 __ovld __cnfn convert_ulong8(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_sat(uchar8);
+ulong8 __ovld __cnfn convert_ulong8_rte(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8);
+ulong8 __ovld __cnfn convert_ulong8(short8);
+ulong8 __ovld __cnfn convert_ulong8_sat(short8);
+ulong8 __ovld __cnfn convert_ulong8_rte(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8);
+ulong8 __ovld __cnfn convert_ulong8(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_sat(ushort8);
+ulong8 __ovld __cnfn convert_ulong8_rte(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8);
+ulong8 __ovld __cnfn convert_ulong8(int8);
+ulong8 __ovld __cnfn convert_ulong8_sat(int8);
+ulong8 __ovld __cnfn convert_ulong8_rte(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8);
+ulong8 __ovld __cnfn convert_ulong8(uint8);
+ulong8 __ovld __cnfn convert_ulong8_sat(uint8);
+ulong8 __ovld __cnfn convert_ulong8_rte(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8);
+ulong8 __ovld __cnfn convert_ulong8(long8);
+ulong8 __ovld __cnfn convert_ulong8_sat(long8);
+ulong8 __ovld __cnfn convert_ulong8_rte(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8);
+ulong8 __ovld __cnfn convert_ulong8(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_sat(ulong8);
+ulong8 __ovld __cnfn convert_ulong8_rte(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8);
+ulong8 __ovld __cnfn convert_ulong8(float8);
+ulong8 __ovld __cnfn convert_ulong8_sat(float8);
+float8 __ovld __cnfn convert_float8_rte(char8);
+float8 __ovld __cnfn convert_float8_rtz(char8);
+float8 __ovld __cnfn convert_float8_rtp(char8);
+float8 __ovld __cnfn convert_float8_rtn(char8);
+float8 __ovld __cnfn convert_float8(char8);
+float8 __ovld __cnfn convert_float8_rte(uchar8);
+float8 __ovld __cnfn convert_float8_rtz(uchar8);
+float8 __ovld __cnfn convert_float8_rtp(uchar8);
+float8 __ovld __cnfn convert_float8_rtn(uchar8);
+float8 __ovld __cnfn convert_float8(uchar8);
+float8 __ovld __cnfn convert_float8_rte(short8);
+float8 __ovld __cnfn convert_float8_rtz(short8);
+float8 __ovld __cnfn convert_float8_rtp(short8);
+float8 __ovld __cnfn convert_float8_rtn(short8);
+float8 __ovld __cnfn convert_float8(short8);
+float8 __ovld __cnfn convert_float8_rte(ushort8);
+float8 __ovld __cnfn convert_float8_rtz(ushort8);
+float8 __ovld __cnfn convert_float8_rtp(ushort8);
+float8 __ovld __cnfn convert_float8_rtn(ushort8);
+float8 __ovld __cnfn convert_float8(ushort8);
+float8 __ovld __cnfn convert_float8_rte(int8);
+float8 __ovld __cnfn convert_float8_rtz(int8);
+float8 __ovld __cnfn convert_float8_rtp(int8);
+float8 __ovld __cnfn convert_float8_rtn(int8);
+float8 __ovld __cnfn convert_float8(int8);
+float8 __ovld __cnfn convert_float8_rte(uint8);
+float8 __ovld __cnfn convert_float8_rtz(uint8);
+float8 __ovld __cnfn convert_float8_rtp(uint8);
+float8 __ovld __cnfn convert_float8_rtn(uint8);
+float8 __ovld __cnfn convert_float8(uint8);
+float8 __ovld __cnfn convert_float8_rte(long8);
+float8 __ovld __cnfn convert_float8_rtz(long8);
+float8 __ovld __cnfn convert_float8_rtp(long8);
+float8 __ovld __cnfn convert_float8_rtn(long8);
+float8 __ovld __cnfn convert_float8(long8);
+float8 __ovld __cnfn convert_float8_rte(ulong8);
+float8 __ovld __cnfn convert_float8_rtz(ulong8);
+float8 __ovld __cnfn convert_float8_rtp(ulong8);
+float8 __ovld __cnfn convert_float8_rtn(ulong8);
+float8 __ovld __cnfn convert_float8(ulong8);
+float8 __ovld __cnfn convert_float8_rte(float8);
+float8 __ovld __cnfn convert_float8_rtz(float8);
+float8 __ovld __cnfn convert_float8_rtp(float8);
+float8 __ovld __cnfn convert_float8_rtn(float8);
+float8 __ovld __cnfn convert_float8(float8);
+char16 __ovld __cnfn convert_char16_rte(char16);
+char16 __ovld __cnfn convert_char16_sat_rte(char16);
+char16 __ovld __cnfn convert_char16_rtz(char16);
+char16 __ovld __cnfn convert_char16_sat_rtz(char16);
+char16 __ovld __cnfn convert_char16_rtp(char16);
+char16 __ovld __cnfn convert_char16_sat_rtp(char16);
+char16 __ovld __cnfn convert_char16_rtn(char16);
+char16 __ovld __cnfn convert_char16_sat_rtn(char16);
+char16 __ovld __cnfn convert_char16(char16);
+char16 __ovld __cnfn convert_char16_sat(char16);
+char16 __ovld __cnfn convert_char16_rte(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rte(uchar16);
+char16 __ovld __cnfn convert_char16_rtz(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtz(uchar16);
+char16 __ovld __cnfn convert_char16_rtp(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtp(uchar16);
+char16 __ovld __cnfn convert_char16_rtn(uchar16);
+char16 __ovld __cnfn convert_char16_sat_rtn(uchar16);
+char16 __ovld __cnfn convert_char16(uchar16);
+char16 __ovld __cnfn convert_char16_sat(uchar16);
+char16 __ovld __cnfn convert_char16_rte(short16);
+char16 __ovld __cnfn convert_char16_sat_rte(short16);
+char16 __ovld __cnfn convert_char16_rtz(short16);
+char16 __ovld __cnfn convert_char16_sat_rtz(short16);
+char16 __ovld __cnfn convert_char16_rtp(short16);
+char16 __ovld __cnfn convert_char16_sat_rtp(short16);
+char16 __ovld __cnfn convert_char16_rtn(short16);
+char16 __ovld __cnfn convert_char16_sat_rtn(short16);
+char16 __ovld __cnfn convert_char16(short16);
+char16 __ovld __cnfn convert_char16_sat(short16);
+char16 __ovld __cnfn convert_char16_rte(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rte(ushort16);
+char16 __ovld __cnfn convert_char16_rtz(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtz(ushort16);
+char16 __ovld __cnfn convert_char16_rtp(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtp(ushort16);
+char16 __ovld __cnfn convert_char16_rtn(ushort16);
+char16 __ovld __cnfn convert_char16_sat_rtn(ushort16);
+char16 __ovld __cnfn convert_char16(ushort16);
+char16 __ovld __cnfn convert_char16_sat(ushort16);
+char16 __ovld __cnfn convert_char16_rte(int16);
+char16 __ovld __cnfn convert_char16_sat_rte(int16);
+char16 __ovld __cnfn convert_char16_rtz(int16);
+char16 __ovld __cnfn convert_char16_sat_rtz(int16);
+char16 __ovld __cnfn convert_char16_rtp(int16);
+char16 __ovld __cnfn convert_char16_sat_rtp(int16);
+char16 __ovld __cnfn convert_char16_rtn(int16);
+char16 __ovld __cnfn convert_char16_sat_rtn(int16);
+char16 __ovld __cnfn convert_char16(int16);
+char16 __ovld __cnfn convert_char16_sat(int16);
+char16 __ovld __cnfn convert_char16_rte(uint16);
+char16 __ovld __cnfn convert_char16_sat_rte(uint16);
+char16 __ovld __cnfn convert_char16_rtz(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtz(uint16);
+char16 __ovld __cnfn convert_char16_rtp(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtp(uint16);
+char16 __ovld __cnfn convert_char16_rtn(uint16);
+char16 __ovld __cnfn convert_char16_sat_rtn(uint16);
+char16 __ovld __cnfn convert_char16(uint16);
+char16 __ovld __cnfn convert_char16_sat(uint16);
+char16 __ovld __cnfn convert_char16_rte(long16);
+char16 __ovld __cnfn convert_char16_sat_rte(long16);
+char16 __ovld __cnfn convert_char16_rtz(long16);
+char16 __ovld __cnfn convert_char16_sat_rtz(long16);
+char16 __ovld __cnfn convert_char16_rtp(long16);
+char16 __ovld __cnfn convert_char16_sat_rtp(long16);
+char16 __ovld __cnfn convert_char16_rtn(long16);
+char16 __ovld __cnfn convert_char16_sat_rtn(long16);
+char16 __ovld __cnfn convert_char16(long16);
+char16 __ovld __cnfn convert_char16_sat(long16);
+char16 __ovld __cnfn convert_char16_rte(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rte(ulong16);
+char16 __ovld __cnfn convert_char16_rtz(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtz(ulong16);
+char16 __ovld __cnfn convert_char16_rtp(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtp(ulong16);
+char16 __ovld __cnfn convert_char16_rtn(ulong16);
+char16 __ovld __cnfn convert_char16_sat_rtn(ulong16);
+char16 __ovld __cnfn convert_char16(ulong16);
+char16 __ovld __cnfn convert_char16_sat(ulong16);
+char16 __ovld __cnfn convert_char16_rte(float16);
+char16 __ovld __cnfn convert_char16_sat_rte(float16);
+char16 __ovld __cnfn convert_char16_rtz(float16);
+char16 __ovld __cnfn convert_char16_sat_rtz(float16);
+char16 __ovld __cnfn convert_char16_rtp(float16);
+char16 __ovld __cnfn convert_char16_sat_rtp(float16);
+char16 __ovld __cnfn convert_char16_rtn(float16);
+char16 __ovld __cnfn convert_char16_sat_rtn(float16);
+char16 __ovld __cnfn convert_char16(float16);
+char16 __ovld __cnfn convert_char16_sat(float16);
+uchar16 __ovld __cnfn convert_uchar16_rte(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16);
+uchar16 __ovld __cnfn convert_uchar16(char16);
+uchar16 __ovld __cnfn convert_uchar16_sat(char16);
+uchar16 __ovld __cnfn convert_uchar16_rte(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16);
+uchar16 __ovld __cnfn convert_uchar16(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_sat(uchar16);
+uchar16 __ovld __cnfn convert_uchar16_rte(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16);
+uchar16 __ovld __cnfn convert_uchar16(short16);
+uchar16 __ovld __cnfn convert_uchar16_sat(short16);
+uchar16 __ovld __cnfn convert_uchar16_rte(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16);
+uchar16 __ovld __cnfn convert_uchar16(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_sat(ushort16);
+uchar16 __ovld __cnfn convert_uchar16_rte(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16);
+uchar16 __ovld __cnfn convert_uchar16(int16);
+uchar16 __ovld __cnfn convert_uchar16_sat(int16);
+uchar16 __ovld __cnfn convert_uchar16_rte(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16);
+uchar16 __ovld __cnfn convert_uchar16(uint16);
+uchar16 __ovld __cnfn convert_uchar16_sat(uint16);
+uchar16 __ovld __cnfn convert_uchar16_rte(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16);
+uchar16 __ovld __cnfn convert_uchar16(long16);
+uchar16 __ovld __cnfn convert_uchar16_sat(long16);
+uchar16 __ovld __cnfn convert_uchar16_rte(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16);
+uchar16 __ovld __cnfn convert_uchar16(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_sat(ulong16);
+uchar16 __ovld __cnfn convert_uchar16_rte(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16);
+uchar16 __ovld __cnfn convert_uchar16(float16);
+uchar16 __ovld __cnfn convert_uchar16_sat(float16);
+short16 __ovld __cnfn convert_short16_rte(char16);
+short16 __ovld __cnfn convert_short16_sat_rte(char16);
+short16 __ovld __cnfn convert_short16_rtz(char16);
+short16 __ovld __cnfn convert_short16_sat_rtz(char16);
+short16 __ovld __cnfn convert_short16_rtp(char16);
+short16 __ovld __cnfn convert_short16_sat_rtp(char16);
+short16 __ovld __cnfn convert_short16_rtn(char16);
+short16 __ovld __cnfn convert_short16_sat_rtn(char16);
+short16 __ovld __cnfn convert_short16(char16);
+short16 __ovld __cnfn convert_short16_sat(char16);
+short16 __ovld __cnfn convert_short16_rte(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rte(uchar16);
+short16 __ovld __cnfn convert_short16_rtz(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtz(uchar16);
+short16 __ovld __cnfn convert_short16_rtp(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtp(uchar16);
+short16 __ovld __cnfn convert_short16_rtn(uchar16);
+short16 __ovld __cnfn convert_short16_sat_rtn(uchar16);
+short16 __ovld __cnfn convert_short16(uchar16);
+short16 __ovld __cnfn convert_short16_sat(uchar16);
+short16 __ovld __cnfn convert_short16_rte(short16);
+short16 __ovld __cnfn convert_short16_sat_rte(short16);
+short16 __ovld __cnfn convert_short16_rtz(short16);
+short16 __ovld __cnfn convert_short16_sat_rtz(short16);
+short16 __ovld __cnfn convert_short16_rtp(short16);
+short16 __ovld __cnfn convert_short16_sat_rtp(short16);
+short16 __ovld __cnfn convert_short16_rtn(short16);
+short16 __ovld __cnfn convert_short16_sat_rtn(short16);
+short16 __ovld __cnfn convert_short16(short16);
+short16 __ovld __cnfn convert_short16_sat(short16);
+short16 __ovld __cnfn convert_short16_rte(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rte(ushort16);
+short16 __ovld __cnfn convert_short16_rtz(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtz(ushort16);
+short16 __ovld __cnfn convert_short16_rtp(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtp(ushort16);
+short16 __ovld __cnfn convert_short16_rtn(ushort16);
+short16 __ovld __cnfn convert_short16_sat_rtn(ushort16);
+short16 __ovld __cnfn convert_short16(ushort16);
+short16 __ovld __cnfn convert_short16_sat(ushort16);
+short16 __ovld __cnfn convert_short16_rte(int16);
+short16 __ovld __cnfn convert_short16_sat_rte(int16);
+short16 __ovld __cnfn convert_short16_rtz(int16);
+short16 __ovld __cnfn convert_short16_sat_rtz(int16);
+short16 __ovld __cnfn convert_short16_rtp(int16);
+short16 __ovld __cnfn convert_short16_sat_rtp(int16);
+short16 __ovld __cnfn convert_short16_rtn(int16);
+short16 __ovld __cnfn convert_short16_sat_rtn(int16);
+short16 __ovld __cnfn convert_short16(int16);
+short16 __ovld __cnfn convert_short16_sat(int16);
+short16 __ovld __cnfn convert_short16_rte(uint16);
+short16 __ovld __cnfn convert_short16_sat_rte(uint16);
+short16 __ovld __cnfn convert_short16_rtz(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtz(uint16);
+short16 __ovld __cnfn convert_short16_rtp(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtp(uint16);
+short16 __ovld __cnfn convert_short16_rtn(uint16);
+short16 __ovld __cnfn convert_short16_sat_rtn(uint16);
+short16 __ovld __cnfn convert_short16(uint16);
+short16 __ovld __cnfn convert_short16_sat(uint16);
+short16 __ovld __cnfn convert_short16_rte(long16);
+short16 __ovld __cnfn convert_short16_sat_rte(long16);
+short16 __ovld __cnfn convert_short16_rtz(long16);
+short16 __ovld __cnfn convert_short16_sat_rtz(long16);
+short16 __ovld __cnfn convert_short16_rtp(long16);
+short16 __ovld __cnfn convert_short16_sat_rtp(long16);
+short16 __ovld __cnfn convert_short16_rtn(long16);
+short16 __ovld __cnfn convert_short16_sat_rtn(long16);
+short16 __ovld __cnfn convert_short16(long16);
+short16 __ovld __cnfn convert_short16_sat(long16);
+short16 __ovld __cnfn convert_short16_rte(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rte(ulong16);
+short16 __ovld __cnfn convert_short16_rtz(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtz(ulong16);
+short16 __ovld __cnfn convert_short16_rtp(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtp(ulong16);
+short16 __ovld __cnfn convert_short16_rtn(ulong16);
+short16 __ovld __cnfn convert_short16_sat_rtn(ulong16);
+short16 __ovld __cnfn convert_short16(ulong16);
+short16 __ovld __cnfn convert_short16_sat(ulong16);
+short16 __ovld __cnfn convert_short16_rte(float16);
+short16 __ovld __cnfn convert_short16_sat_rte(float16);
+short16 __ovld __cnfn convert_short16_rtz(float16);
+short16 __ovld __cnfn convert_short16_sat_rtz(float16);
+short16 __ovld __cnfn convert_short16_rtp(float16);
+short16 __ovld __cnfn convert_short16_sat_rtp(float16);
+short16 __ovld __cnfn convert_short16_rtn(float16);
+short16 __ovld __cnfn convert_short16_sat_rtn(float16);
+short16 __ovld __cnfn convert_short16(float16);
+short16 __ovld __cnfn convert_short16_sat(float16);
+ushort16 __ovld __cnfn convert_ushort16_rte(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16);
+ushort16 __ovld __cnfn convert_ushort16(char16);
+ushort16 __ovld __cnfn convert_ushort16_sat(char16);
+ushort16 __ovld __cnfn convert_ushort16_rte(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16);
+ushort16 __ovld __cnfn convert_ushort16(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_sat(uchar16);
+ushort16 __ovld __cnfn convert_ushort16_rte(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16);
+ushort16 __ovld __cnfn convert_ushort16(short16);
+ushort16 __ovld __cnfn convert_ushort16_sat(short16);
+ushort16 __ovld __cnfn convert_ushort16_rte(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16);
+ushort16 __ovld __cnfn convert_ushort16(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_sat(ushort16);
+ushort16 __ovld __cnfn convert_ushort16_rte(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16);
+ushort16 __ovld __cnfn convert_ushort16(int16);
+ushort16 __ovld __cnfn convert_ushort16_sat(int16);
+ushort16 __ovld __cnfn convert_ushort16_rte(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16);
+ushort16 __ovld __cnfn convert_ushort16(uint16);
+ushort16 __ovld __cnfn convert_ushort16_sat(uint16);
+ushort16 __ovld __cnfn convert_ushort16_rte(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16);
+ushort16 __ovld __cnfn convert_ushort16(long16);
+ushort16 __ovld __cnfn convert_ushort16_sat(long16);
+ushort16 __ovld __cnfn convert_ushort16_rte(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16);
+ushort16 __ovld __cnfn convert_ushort16(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_sat(ulong16);
+ushort16 __ovld __cnfn convert_ushort16_rte(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16);
+ushort16 __ovld __cnfn convert_ushort16(float16);
+ushort16 __ovld __cnfn convert_ushort16_sat(float16);
+int16 __ovld __cnfn convert_int16_rte(char16);
+int16 __ovld __cnfn convert_int16_sat_rte(char16);
+int16 __ovld __cnfn convert_int16_rtz(char16);
+int16 __ovld __cnfn convert_int16_sat_rtz(char16);
+int16 __ovld __cnfn convert_int16_rtp(char16);
+int16 __ovld __cnfn convert_int16_sat_rtp(char16);
+int16 __ovld __cnfn convert_int16_rtn(char16);
+int16 __ovld __cnfn convert_int16_sat_rtn(char16);
+int16 __ovld __cnfn convert_int16(char16);
+int16 __ovld __cnfn convert_int16_sat(char16);
+int16 __ovld __cnfn convert_int16_rte(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rte(uchar16);
+int16 __ovld __cnfn convert_int16_rtz(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtz(uchar16);
+int16 __ovld __cnfn convert_int16_rtp(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtp(uchar16);
+int16 __ovld __cnfn convert_int16_rtn(uchar16);
+int16 __ovld __cnfn convert_int16_sat_rtn(uchar16);
+int16 __ovld __cnfn convert_int16(uchar16);
+int16 __ovld __cnfn convert_int16_sat(uchar16);
+int16 __ovld __cnfn convert_int16_rte(short16);
+int16 __ovld __cnfn convert_int16_sat_rte(short16);
+int16 __ovld __cnfn convert_int16_rtz(short16);
+int16 __ovld __cnfn convert_int16_sat_rtz(short16);
+int16 __ovld __cnfn convert_int16_rtp(short16);
+int16 __ovld __cnfn convert_int16_sat_rtp(short16);
+int16 __ovld __cnfn convert_int16_rtn(short16);
+int16 __ovld __cnfn convert_int16_sat_rtn(short16);
+int16 __ovld __cnfn convert_int16(short16);
+int16 __ovld __cnfn convert_int16_sat(short16);
+int16 __ovld __cnfn convert_int16_rte(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rte(ushort16);
+int16 __ovld __cnfn convert_int16_rtz(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtz(ushort16);
+int16 __ovld __cnfn convert_int16_rtp(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtp(ushort16);
+int16 __ovld __cnfn convert_int16_rtn(ushort16);
+int16 __ovld __cnfn convert_int16_sat_rtn(ushort16);
+int16 __ovld __cnfn convert_int16(ushort16);
+int16 __ovld __cnfn convert_int16_sat(ushort16);
+int16 __ovld __cnfn convert_int16_rte(int16);
+int16 __ovld __cnfn convert_int16_sat_rte(int16);
+int16 __ovld __cnfn convert_int16_rtz(int16);
+int16 __ovld __cnfn convert_int16_sat_rtz(int16);
+int16 __ovld __cnfn convert_int16_rtp(int16);
+int16 __ovld __cnfn convert_int16_sat_rtp(int16);
+int16 __ovld __cnfn convert_int16_rtn(int16);
+int16 __ovld __cnfn convert_int16_sat_rtn(int16);
+int16 __ovld __cnfn convert_int16(int16);
+int16 __ovld __cnfn convert_int16_sat(int16);
+int16 __ovld __cnfn convert_int16_rte(uint16);
+int16 __ovld __cnfn convert_int16_sat_rte(uint16);
+int16 __ovld __cnfn convert_int16_rtz(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtz(uint16);
+int16 __ovld __cnfn convert_int16_rtp(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtp(uint16);
+int16 __ovld __cnfn convert_int16_rtn(uint16);
+int16 __ovld __cnfn convert_int16_sat_rtn(uint16);
+int16 __ovld __cnfn convert_int16(uint16);
+int16 __ovld __cnfn convert_int16_sat(uint16);
+int16 __ovld __cnfn convert_int16_rte(long16);
+int16 __ovld __cnfn convert_int16_sat_rte(long16);
+int16 __ovld __cnfn convert_int16_rtz(long16);
+int16 __ovld __cnfn convert_int16_sat_rtz(long16);
+int16 __ovld __cnfn convert_int16_rtp(long16);
+int16 __ovld __cnfn convert_int16_sat_rtp(long16);
+int16 __ovld __cnfn convert_int16_rtn(long16);
+int16 __ovld __cnfn convert_int16_sat_rtn(long16);
+int16 __ovld __cnfn convert_int16(long16);
+int16 __ovld __cnfn convert_int16_sat(long16);
+int16 __ovld __cnfn convert_int16_rte(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rte(ulong16);
+int16 __ovld __cnfn convert_int16_rtz(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtz(ulong16);
+int16 __ovld __cnfn convert_int16_rtp(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtp(ulong16);
+int16 __ovld __cnfn convert_int16_rtn(ulong16);
+int16 __ovld __cnfn convert_int16_sat_rtn(ulong16);
+int16 __ovld __cnfn convert_int16(ulong16);
+int16 __ovld __cnfn convert_int16_sat(ulong16);
+int16 __ovld __cnfn convert_int16_rte(float16);
+int16 __ovld __cnfn convert_int16_sat_rte(float16);
+int16 __ovld __cnfn convert_int16_rtz(float16);
+int16 __ovld __cnfn convert_int16_sat_rtz(float16);
+int16 __ovld __cnfn convert_int16_rtp(float16);
+int16 __ovld __cnfn convert_int16_sat_rtp(float16);
+int16 __ovld __cnfn convert_int16_rtn(float16);
+int16 __ovld __cnfn convert_int16_sat_rtn(float16);
+int16 __ovld __cnfn convert_int16(float16);
+int16 __ovld __cnfn convert_int16_sat(float16);
+uint16 __ovld __cnfn convert_uint16_rte(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(char16);
+uint16 __ovld __cnfn convert_uint16_rtz(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(char16);
+uint16 __ovld __cnfn convert_uint16_rtp(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(char16);
+uint16 __ovld __cnfn convert_uint16_rtn(char16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(char16);
+uint16 __ovld __cnfn convert_uint16(char16);
+uint16 __ovld __cnfn convert_uint16_sat(char16);
+uint16 __ovld __cnfn convert_uint16_rte(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtz(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtp(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16);
+uint16 __ovld __cnfn convert_uint16_rtn(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16);
+uint16 __ovld __cnfn convert_uint16(uchar16);
+uint16 __ovld __cnfn convert_uint16_sat(uchar16);
+uint16 __ovld __cnfn convert_uint16_rte(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(short16);
+uint16 __ovld __cnfn convert_uint16_rtz(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(short16);
+uint16 __ovld __cnfn convert_uint16_rtp(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(short16);
+uint16 __ovld __cnfn convert_uint16_rtn(short16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(short16);
+uint16 __ovld __cnfn convert_uint16(short16);
+uint16 __ovld __cnfn convert_uint16_sat(short16);
+uint16 __ovld __cnfn convert_uint16_rte(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtz(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtp(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16);
+uint16 __ovld __cnfn convert_uint16_rtn(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16);
+uint16 __ovld __cnfn convert_uint16(ushort16);
+uint16 __ovld __cnfn convert_uint16_sat(ushort16);
+uint16 __ovld __cnfn convert_uint16_rte(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(int16);
+uint16 __ovld __cnfn convert_uint16_rtz(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(int16);
+uint16 __ovld __cnfn convert_uint16_rtp(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(int16);
+uint16 __ovld __cnfn convert_uint16_rtn(int16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(int16);
+uint16 __ovld __cnfn convert_uint16(int16);
+uint16 __ovld __cnfn convert_uint16_sat(int16);
+uint16 __ovld __cnfn convert_uint16_rte(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(uint16);
+uint16 __ovld __cnfn convert_uint16_rtz(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16);
+uint16 __ovld __cnfn convert_uint16_rtp(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16);
+uint16 __ovld __cnfn convert_uint16_rtn(uint16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16);
+uint16 __ovld __cnfn convert_uint16(uint16);
+uint16 __ovld __cnfn convert_uint16_sat(uint16);
+uint16 __ovld __cnfn convert_uint16_rte(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(long16);
+uint16 __ovld __cnfn convert_uint16_rtz(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(long16);
+uint16 __ovld __cnfn convert_uint16_rtp(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(long16);
+uint16 __ovld __cnfn convert_uint16_rtn(long16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(long16);
+uint16 __ovld __cnfn convert_uint16(long16);
+uint16 __ovld __cnfn convert_uint16_sat(long16);
+uint16 __ovld __cnfn convert_uint16_rte(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtz(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtp(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16);
+uint16 __ovld __cnfn convert_uint16_rtn(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16);
+uint16 __ovld __cnfn convert_uint16(ulong16);
+uint16 __ovld __cnfn convert_uint16_sat(ulong16);
+uint16 __ovld __cnfn convert_uint16_rte(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(float16);
+uint16 __ovld __cnfn convert_uint16_rtz(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(float16);
+uint16 __ovld __cnfn convert_uint16_rtp(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(float16);
+uint16 __ovld __cnfn convert_uint16_rtn(float16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(float16);
+uint16 __ovld __cnfn convert_uint16(float16);
+uint16 __ovld __cnfn convert_uint16_sat(float16);
+long16 __ovld __cnfn convert_long16_rte(char16);
+long16 __ovld __cnfn convert_long16_sat_rte(char16);
+long16 __ovld __cnfn convert_long16_rtz(char16);
+long16 __ovld __cnfn convert_long16_sat_rtz(char16);
+long16 __ovld __cnfn convert_long16_rtp(char16);
+long16 __ovld __cnfn convert_long16_sat_rtp(char16);
+long16 __ovld __cnfn convert_long16_rtn(char16);
+long16 __ovld __cnfn convert_long16_sat_rtn(char16);
+long16 __ovld __cnfn convert_long16(char16);
+long16 __ovld __cnfn convert_long16_sat(char16);
+long16 __ovld __cnfn convert_long16_rte(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rte(uchar16);
+long16 __ovld __cnfn convert_long16_rtz(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtz(uchar16);
+long16 __ovld __cnfn convert_long16_rtp(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtp(uchar16);
+long16 __ovld __cnfn convert_long16_rtn(uchar16);
+long16 __ovld __cnfn convert_long16_sat_rtn(uchar16);
+long16 __ovld __cnfn convert_long16(uchar16);
+long16 __ovld __cnfn convert_long16_sat(uchar16);
+long16 __ovld __cnfn convert_long16_rte(short16);
+long16 __ovld __cnfn convert_long16_sat_rte(short16);
+long16 __ovld __cnfn convert_long16_rtz(short16);
+long16 __ovld __cnfn convert_long16_sat_rtz(short16);
+long16 __ovld __cnfn convert_long16_rtp(short16);
+long16 __ovld __cnfn convert_long16_sat_rtp(short16);
+long16 __ovld __cnfn convert_long16_rtn(short16);
+long16 __ovld __cnfn convert_long16_sat_rtn(short16);
+long16 __ovld __cnfn convert_long16(short16);
+long16 __ovld __cnfn convert_long16_sat(short16);
+long16 __ovld __cnfn convert_long16_rte(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rte(ushort16);
+long16 __ovld __cnfn convert_long16_rtz(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtz(ushort16);
+long16 __ovld __cnfn convert_long16_rtp(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtp(ushort16);
+long16 __ovld __cnfn convert_long16_rtn(ushort16);
+long16 __ovld __cnfn convert_long16_sat_rtn(ushort16);
+long16 __ovld __cnfn convert_long16(ushort16);
+long16 __ovld __cnfn convert_long16_sat(ushort16);
+long16 __ovld __cnfn convert_long16_rte(int16);
+long16 __ovld __cnfn convert_long16_sat_rte(int16);
+long16 __ovld __cnfn convert_long16_rtz(int16);
+long16 __ovld __cnfn convert_long16_sat_rtz(int16);
+long16 __ovld __cnfn convert_long16_rtp(int16);
+long16 __ovld __cnfn convert_long16_sat_rtp(int16);
+long16 __ovld __cnfn convert_long16_rtn(int16);
+long16 __ovld __cnfn convert_long16_sat_rtn(int16);
+long16 __ovld __cnfn convert_long16(int16);
+long16 __ovld __cnfn convert_long16_sat(int16);
+long16 __ovld __cnfn convert_long16_rte(uint16);
+long16 __ovld __cnfn convert_long16_sat_rte(uint16);
+long16 __ovld __cnfn convert_long16_rtz(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtz(uint16);
+long16 __ovld __cnfn convert_long16_rtp(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtp(uint16);
+long16 __ovld __cnfn convert_long16_rtn(uint16);
+long16 __ovld __cnfn convert_long16_sat_rtn(uint16);
+long16 __ovld __cnfn convert_long16(uint16);
+long16 __ovld __cnfn convert_long16_sat(uint16);
+long16 __ovld __cnfn convert_long16_rte(long16);
+long16 __ovld __cnfn convert_long16_sat_rte(long16);
+long16 __ovld __cnfn convert_long16_rtz(long16);
+long16 __ovld __cnfn convert_long16_sat_rtz(long16);
+long16 __ovld __cnfn convert_long16_rtp(long16);
+long16 __ovld __cnfn convert_long16_sat_rtp(long16);
+long16 __ovld __cnfn convert_long16_rtn(long16);
+long16 __ovld __cnfn convert_long16_sat_rtn(long16);
+long16 __ovld __cnfn convert_long16(long16);
+long16 __ovld __cnfn convert_long16_sat(long16);
+long16 __ovld __cnfn convert_long16_rte(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rte(ulong16);
+long16 __ovld __cnfn convert_long16_rtz(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtz(ulong16);
+long16 __ovld __cnfn convert_long16_rtp(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtp(ulong16);
+long16 __ovld __cnfn convert_long16_rtn(ulong16);
+long16 __ovld __cnfn convert_long16_sat_rtn(ulong16);
+long16 __ovld __cnfn convert_long16(ulong16);
+long16 __ovld __cnfn convert_long16_sat(ulong16);
+long16 __ovld __cnfn convert_long16_rte(float16);
+long16 __ovld __cnfn convert_long16_sat_rte(float16);
+long16 __ovld __cnfn convert_long16_rtz(float16);
+long16 __ovld __cnfn convert_long16_sat_rtz(float16);
+long16 __ovld __cnfn convert_long16_rtp(float16);
+long16 __ovld __cnfn convert_long16_sat_rtp(float16);
+long16 __ovld __cnfn convert_long16_rtn(float16);
+long16 __ovld __cnfn convert_long16_sat_rtn(float16);
+long16 __ovld __cnfn convert_long16(float16);
+long16 __ovld __cnfn convert_long16_sat(float16);
+ulong16 __ovld __cnfn convert_ulong16_rte(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16);
+ulong16 __ovld __cnfn convert_ulong16(char16);
+ulong16 __ovld __cnfn convert_ulong16_sat(char16);
+ulong16 __ovld __cnfn convert_ulong16_rte(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16);
+ulong16 __ovld __cnfn convert_ulong16(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_sat(uchar16);
+ulong16 __ovld __cnfn convert_ulong16_rte(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16);
+ulong16 __ovld __cnfn convert_ulong16(short16);
+ulong16 __ovld __cnfn convert_ulong16_sat(short16);
+ulong16 __ovld __cnfn convert_ulong16_rte(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16);
+ulong16 __ovld __cnfn convert_ulong16(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_sat(ushort16);
+ulong16 __ovld __cnfn convert_ulong16_rte(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16);
+ulong16 __ovld __cnfn convert_ulong16(int16);
+ulong16 __ovld __cnfn convert_ulong16_sat(int16);
+ulong16 __ovld __cnfn convert_ulong16_rte(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16);
+ulong16 __ovld __cnfn convert_ulong16(uint16);
+ulong16 __ovld __cnfn convert_ulong16_sat(uint16);
+ulong16 __ovld __cnfn convert_ulong16_rte(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16);
+ulong16 __ovld __cnfn convert_ulong16(long16);
+ulong16 __ovld __cnfn convert_ulong16_sat(long16);
+ulong16 __ovld __cnfn convert_ulong16_rte(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16);
+ulong16 __ovld __cnfn convert_ulong16(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_sat(ulong16);
+ulong16 __ovld __cnfn convert_ulong16_rte(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16);
+ulong16 __ovld __cnfn convert_ulong16(float16);
+ulong16 __ovld __cnfn convert_ulong16_sat(float16);
+float16 __ovld __cnfn convert_float16_rte(char16);
+float16 __ovld __cnfn convert_float16_rtz(char16);
+float16 __ovld __cnfn convert_float16_rtp(char16);
+float16 __ovld __cnfn convert_float16_rtn(char16);
+float16 __ovld __cnfn convert_float16(char16);
+float16 __ovld __cnfn convert_float16_rte(uchar16);
+float16 __ovld __cnfn convert_float16_rtz(uchar16);
+float16 __ovld __cnfn convert_float16_rtp(uchar16);
+float16 __ovld __cnfn convert_float16_rtn(uchar16);
+float16 __ovld __cnfn convert_float16(uchar16);
+float16 __ovld __cnfn convert_float16_rte(short16);
+float16 __ovld __cnfn convert_float16_rtz(short16);
+float16 __ovld __cnfn convert_float16_rtp(short16);
+float16 __ovld __cnfn convert_float16_rtn(short16);
+float16 __ovld __cnfn convert_float16(short16);
+float16 __ovld __cnfn convert_float16_rte(ushort16);
+float16 __ovld __cnfn convert_float16_rtz(ushort16);
+float16 __ovld __cnfn convert_float16_rtp(ushort16);
+float16 __ovld __cnfn convert_float16_rtn(ushort16);
+float16 __ovld __cnfn convert_float16(ushort16);
+float16 __ovld __cnfn convert_float16_rte(int16);
+float16 __ovld __cnfn convert_float16_rtz(int16);
+float16 __ovld __cnfn convert_float16_rtp(int16);
+float16 __ovld __cnfn convert_float16_rtn(int16);
+float16 __ovld __cnfn convert_float16(int16);
+float16 __ovld __cnfn convert_float16_rte(uint16);
+float16 __ovld __cnfn convert_float16_rtz(uint16);
+float16 __ovld __cnfn convert_float16_rtp(uint16);
+float16 __ovld __cnfn convert_float16_rtn(uint16);
+float16 __ovld __cnfn convert_float16(uint16);
+float16 __ovld __cnfn convert_float16_rte(long16);
+float16 __ovld __cnfn convert_float16_rtz(long16);
+float16 __ovld __cnfn convert_float16_rtp(long16);
+float16 __ovld __cnfn convert_float16_rtn(long16);
+float16 __ovld __cnfn convert_float16(long16);
+float16 __ovld __cnfn convert_float16_rte(ulong16);
+float16 __ovld __cnfn convert_float16_rtz(ulong16);
+float16 __ovld __cnfn convert_float16_rtp(ulong16);
+float16 __ovld __cnfn convert_float16_rtn(ulong16);
+float16 __ovld __cnfn convert_float16(ulong16);
+float16 __ovld __cnfn convert_float16_rte(float16);
+float16 __ovld __cnfn convert_float16_rtz(float16);
+float16 __ovld __cnfn convert_float16_rtp(float16);
+float16 __ovld __cnfn convert_float16_rtn(float16);
+float16 __ovld __cnfn convert_float16(float16);
+
+// Conversions with double data type parameters or return value.
+
+#ifdef cl_khr_fp64
+char __ovld __cnfn convert_char(double);
+char __ovld __cnfn convert_char_rte(double);
+char __ovld __cnfn convert_char_rtn(double);
+char __ovld __cnfn convert_char_rtp(double);
+char __ovld __cnfn convert_char_rtz(double);
+char __ovld __cnfn convert_char_sat(double);
+char __ovld __cnfn convert_char_sat_rte(double);
+char __ovld __cnfn convert_char_sat_rtn(double);
+char __ovld __cnfn convert_char_sat_rtp(double);
+char __ovld __cnfn convert_char_sat_rtz(double);
+char2 __ovld __cnfn convert_char2(double2);
+char2 __ovld __cnfn convert_char2_rte(double2);
+char2 __ovld __cnfn convert_char2_rtn(double2);
+char2 __ovld __cnfn convert_char2_rtp(double2);
+char2 __ovld __cnfn convert_char2_rtz(double2);
+char2 __ovld __cnfn convert_char2_sat(double2);
+char2 __ovld __cnfn convert_char2_sat_rte(double2);
+char2 __ovld __cnfn convert_char2_sat_rtn(double2);
+char2 __ovld __cnfn convert_char2_sat_rtp(double2);
+char2 __ovld __cnfn convert_char2_sat_rtz(double2);
+char3 __ovld __cnfn convert_char3(double3);
+char3 __ovld __cnfn convert_char3_rte(double3);
+char3 __ovld __cnfn convert_char3_rtn(double3);
+char3 __ovld __cnfn convert_char3_rtp(double3);
+char3 __ovld __cnfn convert_char3_rtz(double3);
+char3 __ovld __cnfn convert_char3_sat(double3);
+char3 __ovld __cnfn convert_char3_sat_rte(double3);
+char3 __ovld __cnfn convert_char3_sat_rtn(double3);
+char3 __ovld __cnfn convert_char3_sat_rtp(double3);
+char3 __ovld __cnfn convert_char3_sat_rtz(double3);
+char4 __ovld __cnfn convert_char4(double4);
+char4 __ovld __cnfn convert_char4_rte(double4);
+char4 __ovld __cnfn convert_char4_rtn(double4);
+char4 __ovld __cnfn convert_char4_rtp(double4);
+char4 __ovld __cnfn convert_char4_rtz(double4);
+char4 __ovld __cnfn convert_char4_sat(double4);
+char4 __ovld __cnfn convert_char4_sat_rte(double4);
+char4 __ovld __cnfn convert_char4_sat_rtn(double4);
+char4 __ovld __cnfn convert_char4_sat_rtp(double4);
+char4 __ovld __cnfn convert_char4_sat_rtz(double4);
+char8 __ovld __cnfn convert_char8(double8);
+char8 __ovld __cnfn convert_char8_rte(double8);
+char8 __ovld __cnfn convert_char8_rtn(double8);
+char8 __ovld __cnfn convert_char8_rtp(double8);
+char8 __ovld __cnfn convert_char8_rtz(double8);
+char8 __ovld __cnfn convert_char8_sat(double8);
+char8 __ovld __cnfn convert_char8_sat_rte(double8);
+char8 __ovld __cnfn convert_char8_sat_rtn(double8);
+char8 __ovld __cnfn convert_char8_sat_rtp(double8);
+char8 __ovld __cnfn convert_char8_sat_rtz(double8);
+char16 __ovld __cnfn convert_char16(double16);
+char16 __ovld __cnfn convert_char16_rte(double16);
+char16 __ovld __cnfn convert_char16_rtn(double16);
+char16 __ovld __cnfn convert_char16_rtp(double16);
+char16 __ovld __cnfn convert_char16_rtz(double16);
+char16 __ovld __cnfn convert_char16_sat(double16);
+char16 __ovld __cnfn convert_char16_sat_rte(double16);
+char16 __ovld __cnfn convert_char16_sat_rtn(double16);
+char16 __ovld __cnfn convert_char16_sat_rtp(double16);
+char16 __ovld __cnfn convert_char16_sat_rtz(double16);
+
+uchar __ovld __cnfn convert_uchar(double);
+uchar __ovld __cnfn convert_uchar_rte(double);
+uchar __ovld __cnfn convert_uchar_rtn(double);
+uchar __ovld __cnfn convert_uchar_rtp(double);
+uchar __ovld __cnfn convert_uchar_rtz(double);
+uchar __ovld __cnfn convert_uchar_sat(double);
+uchar __ovld __cnfn convert_uchar_sat_rte(double);
+uchar __ovld __cnfn convert_uchar_sat_rtn(double);
+uchar __ovld __cnfn convert_uchar_sat_rtp(double);
+uchar __ovld __cnfn convert_uchar_sat_rtz(double);
+uchar2 __ovld __cnfn convert_uchar2(double2);
+uchar2 __ovld __cnfn convert_uchar2_rte(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(double2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2);
+uchar3 __ovld __cnfn convert_uchar3(double3);
+uchar3 __ovld __cnfn convert_uchar3_rte(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(double3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3);
+uchar4 __ovld __cnfn convert_uchar4(double4);
+uchar4 __ovld __cnfn convert_uchar4_rte(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(double4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4);
+uchar8 __ovld __cnfn convert_uchar8(double8);
+uchar8 __ovld __cnfn convert_uchar8_rte(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(double8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8);
+uchar16 __ovld __cnfn convert_uchar16(double16);
+uchar16 __ovld __cnfn convert_uchar16_rte(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(double16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16);
+
+short __ovld __cnfn convert_short(double);
+short __ovld __cnfn convert_short_rte(double);
+short __ovld __cnfn convert_short_rtn(double);
+short __ovld __cnfn convert_short_rtp(double);
+short __ovld __cnfn convert_short_rtz(double);
+short __ovld __cnfn convert_short_sat(double);
+short __ovld __cnfn convert_short_sat_rte(double);
+short __ovld __cnfn convert_short_sat_rtn(double);
+short __ovld __cnfn convert_short_sat_rtp(double);
+short __ovld __cnfn convert_short_sat_rtz(double);
+short2 __ovld __cnfn convert_short2(double2);
+short2 __ovld __cnfn convert_short2_rte(double2);
+short2 __ovld __cnfn convert_short2_rtn(double2);
+short2 __ovld __cnfn convert_short2_rtp(double2);
+short2 __ovld __cnfn convert_short2_rtz(double2);
+short2 __ovld __cnfn convert_short2_sat(double2);
+short2 __ovld __cnfn convert_short2_sat_rte(double2);
+short2 __ovld __cnfn convert_short2_sat_rtn(double2);
+short2 __ovld __cnfn convert_short2_sat_rtp(double2);
+short2 __ovld __cnfn convert_short2_sat_rtz(double2);
+short3 __ovld __cnfn convert_short3(double3);
+short3 __ovld __cnfn convert_short3_rte(double3);
+short3 __ovld __cnfn convert_short3_rtn(double3);
+short3 __ovld __cnfn convert_short3_rtp(double3);
+short3 __ovld __cnfn convert_short3_rtz(double3);
+short3 __ovld __cnfn convert_short3_sat(double3);
+short3 __ovld __cnfn convert_short3_sat_rte(double3);
+short3 __ovld __cnfn convert_short3_sat_rtn(double3);
+short3 __ovld __cnfn convert_short3_sat_rtp(double3);
+short3 __ovld __cnfn convert_short3_sat_rtz(double3);
+short4 __ovld __cnfn convert_short4(double4);
+short4 __ovld __cnfn convert_short4_rte(double4);
+short4 __ovld __cnfn convert_short4_rtn(double4);
+short4 __ovld __cnfn convert_short4_rtp(double4);
+short4 __ovld __cnfn convert_short4_rtz(double4);
+short4 __ovld __cnfn convert_short4_sat(double4);
+short4 __ovld __cnfn convert_short4_sat_rte(double4);
+short4 __ovld __cnfn convert_short4_sat_rtn(double4);
+short4 __ovld __cnfn convert_short4_sat_rtp(double4);
+short4 __ovld __cnfn convert_short4_sat_rtz(double4);
+short8 __ovld __cnfn convert_short8(double8);
+short8 __ovld __cnfn convert_short8_rte(double8);
+short8 __ovld __cnfn convert_short8_rtn(double8);
+short8 __ovld __cnfn convert_short8_rtp(double8);
+short8 __ovld __cnfn convert_short8_rtz(double8);
+short8 __ovld __cnfn convert_short8_sat(double8);
+short8 __ovld __cnfn convert_short8_sat_rte(double8);
+short8 __ovld __cnfn convert_short8_sat_rtn(double8);
+short8 __ovld __cnfn convert_short8_sat_rtp(double8);
+short8 __ovld __cnfn convert_short8_sat_rtz(double8);
+short16 __ovld __cnfn convert_short16(double16);
+short16 __ovld __cnfn convert_short16_rte(double16);
+short16 __ovld __cnfn convert_short16_rtn(double16);
+short16 __ovld __cnfn convert_short16_rtp(double16);
+short16 __ovld __cnfn convert_short16_rtz(double16);
+short16 __ovld __cnfn convert_short16_sat(double16);
+short16 __ovld __cnfn convert_short16_sat_rte(double16);
+short16 __ovld __cnfn convert_short16_sat_rtn(double16);
+short16 __ovld __cnfn convert_short16_sat_rtp(double16);
+short16 __ovld __cnfn convert_short16_sat_rtz(double16);
+
+ushort __ovld __cnfn convert_ushort(double);
+ushort __ovld __cnfn convert_ushort_rte(double);
+ushort __ovld __cnfn convert_ushort_rtn(double);
+ushort __ovld __cnfn convert_ushort_rtp(double);
+ushort __ovld __cnfn convert_ushort_rtz(double);
+ushort __ovld __cnfn convert_ushort_sat(double);
+ushort __ovld __cnfn convert_ushort_sat_rte(double);
+ushort __ovld __cnfn convert_ushort_sat_rtn(double);
+ushort __ovld __cnfn convert_ushort_sat_rtp(double);
+ushort __ovld __cnfn convert_ushort_sat_rtz(double);
+ushort2 __ovld __cnfn convert_ushort2(double2);
+ushort2 __ovld __cnfn convert_ushort2_rte(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(double2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2);
+ushort3 __ovld __cnfn convert_ushort3(double3);
+ushort3 __ovld __cnfn convert_ushort3_rte(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(double3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3);
+ushort4 __ovld __cnfn convert_ushort4(double4);
+ushort4 __ovld __cnfn convert_ushort4_rte(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(double4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4);
+ushort8 __ovld __cnfn convert_ushort8(double8);
+ushort8 __ovld __cnfn convert_ushort8_rte(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(double8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8);
+ushort16 __ovld __cnfn convert_ushort16(double16);
+ushort16 __ovld __cnfn convert_ushort16_rte(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(double16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16);
+
+int __ovld __cnfn convert_int(double);
+int __ovld __cnfn convert_int_rte(double);
+int __ovld __cnfn convert_int_rtn(double);
+int __ovld __cnfn convert_int_rtp(double);
+int __ovld __cnfn convert_int_rtz(double);
+int __ovld __cnfn convert_int_sat(double);
+int __ovld __cnfn convert_int_sat_rte(double);
+int __ovld __cnfn convert_int_sat_rtn(double);
+int __ovld __cnfn convert_int_sat_rtp(double);
+int __ovld __cnfn convert_int_sat_rtz(double);
+int2 __ovld __cnfn convert_int2(double2);
+int2 __ovld __cnfn convert_int2_rte(double2);
+int2 __ovld __cnfn convert_int2_rtn(double2);
+int2 __ovld __cnfn convert_int2_rtp(double2);
+int2 __ovld __cnfn convert_int2_rtz(double2);
+int2 __ovld __cnfn convert_int2_sat(double2);
+int2 __ovld __cnfn convert_int2_sat_rte(double2);
+int2 __ovld __cnfn convert_int2_sat_rtn(double2);
+int2 __ovld __cnfn convert_int2_sat_rtp(double2);
+int2 __ovld __cnfn convert_int2_sat_rtz(double2);
+int3 __ovld __cnfn convert_int3(double3);
+int3 __ovld __cnfn convert_int3_rte(double3);
+int3 __ovld __cnfn convert_int3_rtn(double3);
+int3 __ovld __cnfn convert_int3_rtp(double3);
+int3 __ovld __cnfn convert_int3_rtz(double3);
+int3 __ovld __cnfn convert_int3_sat(double3);
+int3 __ovld __cnfn convert_int3_sat_rte(double3);
+int3 __ovld __cnfn convert_int3_sat_rtn(double3);
+int3 __ovld __cnfn convert_int3_sat_rtp(double3);
+int3 __ovld __cnfn convert_int3_sat_rtz(double3);
+int4 __ovld __cnfn convert_int4(double4);
+int4 __ovld __cnfn convert_int4_rte(double4);
+int4 __ovld __cnfn convert_int4_rtn(double4);
+int4 __ovld __cnfn convert_int4_rtp(double4);
+int4 __ovld __cnfn convert_int4_rtz(double4);
+int4 __ovld __cnfn convert_int4_sat(double4);
+int4 __ovld __cnfn convert_int4_sat_rte(double4);
+int4 __ovld __cnfn convert_int4_sat_rtn(double4);
+int4 __ovld __cnfn convert_int4_sat_rtp(double4);
+int4 __ovld __cnfn convert_int4_sat_rtz(double4);
+int8 __ovld __cnfn convert_int8(double8);
+int8 __ovld __cnfn convert_int8_rte(double8);
+int8 __ovld __cnfn convert_int8_rtn(double8);
+int8 __ovld __cnfn convert_int8_rtp(double8);
+int8 __ovld __cnfn convert_int8_rtz(double8);
+int8 __ovld __cnfn convert_int8_sat(double8);
+int8 __ovld __cnfn convert_int8_sat_rte(double8);
+int8 __ovld __cnfn convert_int8_sat_rtn(double8);
+int8 __ovld __cnfn convert_int8_sat_rtp(double8);
+int8 __ovld __cnfn convert_int8_sat_rtz(double8);
+int16 __ovld __cnfn convert_int16(double16);
+int16 __ovld __cnfn convert_int16_rte(double16);
+int16 __ovld __cnfn convert_int16_rtn(double16);
+int16 __ovld __cnfn convert_int16_rtp(double16);
+int16 __ovld __cnfn convert_int16_rtz(double16);
+int16 __ovld __cnfn convert_int16_sat(double16);
+int16 __ovld __cnfn convert_int16_sat_rte(double16);
+int16 __ovld __cnfn convert_int16_sat_rtn(double16);
+int16 __ovld __cnfn convert_int16_sat_rtp(double16);
+int16 __ovld __cnfn convert_int16_sat_rtz(double16);
+
+uint __ovld __cnfn convert_uint(double);
+uint __ovld __cnfn convert_uint_rte(double);
+uint __ovld __cnfn convert_uint_rtn(double);
+uint __ovld __cnfn convert_uint_rtp(double);
+uint __ovld __cnfn convert_uint_rtz(double);
+uint __ovld __cnfn convert_uint_sat(double);
+uint __ovld __cnfn convert_uint_sat_rte(double);
+uint __ovld __cnfn convert_uint_sat_rtn(double);
+uint __ovld __cnfn convert_uint_sat_rtp(double);
+uint __ovld __cnfn convert_uint_sat_rtz(double);
+uint2 __ovld __cnfn convert_uint2(double2);
+uint2 __ovld __cnfn convert_uint2_rte(double2);
+uint2 __ovld __cnfn convert_uint2_rtn(double2);
+uint2 __ovld __cnfn convert_uint2_rtp(double2);
+uint2 __ovld __cnfn convert_uint2_rtz(double2);
+uint2 __ovld __cnfn convert_uint2_sat(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(double2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(double2);
+uint3 __ovld __cnfn convert_uint3(double3);
+uint3 __ovld __cnfn convert_uint3_rte(double3);
+uint3 __ovld __cnfn convert_uint3_rtn(double3);
+uint3 __ovld __cnfn convert_uint3_rtp(double3);
+uint3 __ovld __cnfn convert_uint3_rtz(double3);
+uint3 __ovld __cnfn convert_uint3_sat(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(double3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(double3);
+uint4 __ovld __cnfn convert_uint4(double4);
+uint4 __ovld __cnfn convert_uint4_rte(double4);
+uint4 __ovld __cnfn convert_uint4_rtn(double4);
+uint4 __ovld __cnfn convert_uint4_rtp(double4);
+uint4 __ovld __cnfn convert_uint4_rtz(double4);
+uint4 __ovld __cnfn convert_uint4_sat(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(double4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(double4);
+uint8 __ovld __cnfn convert_uint8(double8);
+uint8 __ovld __cnfn convert_uint8_rte(double8);
+uint8 __ovld __cnfn convert_uint8_rtn(double8);
+uint8 __ovld __cnfn convert_uint8_rtp(double8);
+uint8 __ovld __cnfn convert_uint8_rtz(double8);
+uint8 __ovld __cnfn convert_uint8_sat(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(double8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(double8);
+uint16 __ovld __cnfn convert_uint16(double16);
+uint16 __ovld __cnfn convert_uint16_rte(double16);
+uint16 __ovld __cnfn convert_uint16_rtn(double16);
+uint16 __ovld __cnfn convert_uint16_rtp(double16);
+uint16 __ovld __cnfn convert_uint16_rtz(double16);
+uint16 __ovld __cnfn convert_uint16_sat(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(double16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(double16);
+
+long __ovld __cnfn convert_long(double);
+long __ovld __cnfn convert_long_rte(double);
+long __ovld __cnfn convert_long_rtn(double);
+long __ovld __cnfn convert_long_rtp(double);
+long __ovld __cnfn convert_long_rtz(double);
+long __ovld __cnfn convert_long_sat(double);
+long __ovld __cnfn convert_long_sat_rte(double);
+long __ovld __cnfn convert_long_sat_rtn(double);
+long __ovld __cnfn convert_long_sat_rtp(double);
+long __ovld __cnfn convert_long_sat_rtz(double);
+long2 __ovld __cnfn convert_long2(double2);
+long2 __ovld __cnfn convert_long2_rte(double2);
+long2 __ovld __cnfn convert_long2_rtn(double2);
+long2 __ovld __cnfn convert_long2_rtp(double2);
+long2 __ovld __cnfn convert_long2_rtz(double2);
+long2 __ovld __cnfn convert_long2_sat(double2);
+long2 __ovld __cnfn convert_long2_sat_rte(double2);
+long2 __ovld __cnfn convert_long2_sat_rtn(double2);
+long2 __ovld __cnfn convert_long2_sat_rtp(double2);
+long2 __ovld __cnfn convert_long2_sat_rtz(double2);
+long3 __ovld __cnfn convert_long3(double3);
+long3 __ovld __cnfn convert_long3_rte(double3);
+long3 __ovld __cnfn convert_long3_rtn(double3);
+long3 __ovld __cnfn convert_long3_rtp(double3);
+long3 __ovld __cnfn convert_long3_rtz(double3);
+long3 __ovld __cnfn convert_long3_sat(double3);
+long3 __ovld __cnfn convert_long3_sat_rte(double3);
+long3 __ovld __cnfn convert_long3_sat_rtn(double3);
+long3 __ovld __cnfn convert_long3_sat_rtp(double3);
+long3 __ovld __cnfn convert_long3_sat_rtz(double3);
+long4 __ovld __cnfn convert_long4(double4);
+long4 __ovld __cnfn convert_long4_rte(double4);
+long4 __ovld __cnfn convert_long4_rtn(double4);
+long4 __ovld __cnfn convert_long4_rtp(double4);
+long4 __ovld __cnfn convert_long4_rtz(double4);
+long4 __ovld __cnfn convert_long4_sat(double4);
+long4 __ovld __cnfn convert_long4_sat_rte(double4);
+long4 __ovld __cnfn convert_long4_sat_rtn(double4);
+long4 __ovld __cnfn convert_long4_sat_rtp(double4);
+long4 __ovld __cnfn convert_long4_sat_rtz(double4);
+long8 __ovld __cnfn convert_long8(double8);
+long8 __ovld __cnfn convert_long8_rte(double8);
+long8 __ovld __cnfn convert_long8_rtn(double8);
+long8 __ovld __cnfn convert_long8_rtp(double8);
+long8 __ovld __cnfn convert_long8_rtz(double8);
+long8 __ovld __cnfn convert_long8_sat(double8);
+long8 __ovld __cnfn convert_long8_sat_rte(double8);
+long8 __ovld __cnfn convert_long8_sat_rtn(double8);
+long8 __ovld __cnfn convert_long8_sat_rtp(double8);
+long8 __ovld __cnfn convert_long8_sat_rtz(double8);
+long16 __ovld __cnfn convert_long16(double16);
+long16 __ovld __cnfn convert_long16_rte(double16);
+long16 __ovld __cnfn convert_long16_rtn(double16);
+long16 __ovld __cnfn convert_long16_rtp(double16);
+long16 __ovld __cnfn convert_long16_rtz(double16);
+long16 __ovld __cnfn convert_long16_sat(double16);
+long16 __ovld __cnfn convert_long16_sat_rte(double16);
+long16 __ovld __cnfn convert_long16_sat_rtn(double16);
+long16 __ovld __cnfn convert_long16_sat_rtp(double16);
+long16 __ovld __cnfn convert_long16_sat_rtz(double16);
+
+ulong __ovld __cnfn convert_ulong(double);
+ulong __ovld __cnfn convert_ulong_rte(double);
+ulong __ovld __cnfn convert_ulong_rtn(double);
+ulong __ovld __cnfn convert_ulong_rtp(double);
+ulong __ovld __cnfn convert_ulong_rtz(double);
+ulong __ovld __cnfn convert_ulong_sat(double);
+ulong __ovld __cnfn convert_ulong_sat_rte(double);
+ulong __ovld __cnfn convert_ulong_sat_rtn(double);
+ulong __ovld __cnfn convert_ulong_sat_rtp(double);
+ulong __ovld __cnfn convert_ulong_sat_rtz(double);
+ulong2 __ovld __cnfn convert_ulong2(double2);
+ulong2 __ovld __cnfn convert_ulong2_rte(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(double2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2);
+ulong3 __ovld __cnfn convert_ulong3(double3);
+ulong3 __ovld __cnfn convert_ulong3_rte(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(double3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3);
+ulong4 __ovld __cnfn convert_ulong4(double4);
+ulong4 __ovld __cnfn convert_ulong4_rte(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(double4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4);
+ulong8 __ovld __cnfn convert_ulong8(double8);
+ulong8 __ovld __cnfn convert_ulong8_rte(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(double8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8);
+ulong16 __ovld __cnfn convert_ulong16(double16);
+ulong16 __ovld __cnfn convert_ulong16_rte(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(double16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16);
+
+float __ovld __cnfn convert_float(double);
+float __ovld __cnfn convert_float_rte(double);
+float __ovld __cnfn convert_float_rtn(double);
+float __ovld __cnfn convert_float_rtp(double);
+float __ovld __cnfn convert_float_rtz(double);
+float2 __ovld __cnfn convert_float2(double2);
+float2 __ovld __cnfn convert_float2_rte(double2);
+float2 __ovld __cnfn convert_float2_rtn(double2);
+float2 __ovld __cnfn convert_float2_rtp(double2);
+float2 __ovld __cnfn convert_float2_rtz(double2);
+float3 __ovld __cnfn convert_float3(double3);
+float3 __ovld __cnfn convert_float3_rte(double3);
+float3 __ovld __cnfn convert_float3_rtn(double3);
+float3 __ovld __cnfn convert_float3_rtp(double3);
+float3 __ovld __cnfn convert_float3_rtz(double3);
+float4 __ovld __cnfn convert_float4(double4);
+float4 __ovld __cnfn convert_float4_rte(double4);
+float4 __ovld __cnfn convert_float4_rtn(double4);
+float4 __ovld __cnfn convert_float4_rtp(double4);
+float4 __ovld __cnfn convert_float4_rtz(double4);
+float8 __ovld __cnfn convert_float8(double8);
+float8 __ovld __cnfn convert_float8_rte(double8);
+float8 __ovld __cnfn convert_float8_rtn(double8);
+float8 __ovld __cnfn convert_float8_rtp(double8);
+float8 __ovld __cnfn convert_float8_rtz(double8);
+float16 __ovld __cnfn convert_float16(double16);
+float16 __ovld __cnfn convert_float16_rte(double16);
+float16 __ovld __cnfn convert_float16_rtn(double16);
+float16 __ovld __cnfn convert_float16_rtp(double16);
+float16 __ovld __cnfn convert_float16_rtz(double16);
+
+double __ovld __cnfn convert_double(char);
+double __ovld __cnfn convert_double(double);
+double __ovld __cnfn convert_double(float);
+double __ovld __cnfn convert_double(int);
+double __ovld __cnfn convert_double(long);
+double __ovld __cnfn convert_double(short);
+double __ovld __cnfn convert_double(uchar);
+double __ovld __cnfn convert_double(uint);
+double __ovld __cnfn convert_double(ulong);
+double __ovld __cnfn convert_double(ushort);
+double __ovld __cnfn convert_double_rte(char);
+double __ovld __cnfn convert_double_rte(double);
+double __ovld __cnfn convert_double_rte(float);
+double __ovld __cnfn convert_double_rte(int);
+double __ovld __cnfn convert_double_rte(long);
+double __ovld __cnfn convert_double_rte(short);
+double __ovld __cnfn convert_double_rte(uchar);
+double __ovld __cnfn convert_double_rte(uint);
+double __ovld __cnfn convert_double_rte(ulong);
+double __ovld __cnfn convert_double_rte(ushort);
+double __ovld __cnfn convert_double_rtn(char);
+double __ovld __cnfn convert_double_rtn(double);
+double __ovld __cnfn convert_double_rtn(float);
+double __ovld __cnfn convert_double_rtn(int);
+double __ovld __cnfn convert_double_rtn(long);
+double __ovld __cnfn convert_double_rtn(short);
+double __ovld __cnfn convert_double_rtn(uchar);
+double __ovld __cnfn convert_double_rtn(uint);
+double __ovld __cnfn convert_double_rtn(ulong);
+double __ovld __cnfn convert_double_rtn(ushort);
+double __ovld __cnfn convert_double_rtp(char);
+double __ovld __cnfn convert_double_rtp(double);
+double __ovld __cnfn convert_double_rtp(float);
+double __ovld __cnfn convert_double_rtp(int);
+double __ovld __cnfn convert_double_rtp(long);
+double __ovld __cnfn convert_double_rtp(short);
+double __ovld __cnfn convert_double_rtp(uchar);
+double __ovld __cnfn convert_double_rtp(uint);
+double __ovld __cnfn convert_double_rtp(ulong);
+double __ovld __cnfn convert_double_rtp(ushort);
+double __ovld __cnfn convert_double_rtz(char);
+double __ovld __cnfn convert_double_rtz(double);
+double __ovld __cnfn convert_double_rtz(float);
+double __ovld __cnfn convert_double_rtz(int);
+double __ovld __cnfn convert_double_rtz(long);
+double __ovld __cnfn convert_double_rtz(short);
+double __ovld __cnfn convert_double_rtz(uchar);
+double __ovld __cnfn convert_double_rtz(uint);
+double __ovld __cnfn convert_double_rtz(ulong);
+double __ovld __cnfn convert_double_rtz(ushort);
+double2 __ovld __cnfn convert_double2(char2);
+double2 __ovld __cnfn convert_double2(double2);
+double2 __ovld __cnfn convert_double2(float2);
+double2 __ovld __cnfn convert_double2(int2);
+double2 __ovld __cnfn convert_double2(long2);
+double2 __ovld __cnfn convert_double2(short2);
+double2 __ovld __cnfn convert_double2(uchar2);
+double2 __ovld __cnfn convert_double2(uint2);
+double2 __ovld __cnfn convert_double2(ulong2);
+double2 __ovld __cnfn convert_double2(ushort2);
+double2 __ovld __cnfn convert_double2_rte(char2);
+double2 __ovld __cnfn convert_double2_rte(double2);
+double2 __ovld __cnfn convert_double2_rte(float2);
+double2 __ovld __cnfn convert_double2_rte(int2);
+double2 __ovld __cnfn convert_double2_rte(long2);
+double2 __ovld __cnfn convert_double2_rte(short2);
+double2 __ovld __cnfn convert_double2_rte(uchar2);
+double2 __ovld __cnfn convert_double2_rte(uint2);
+double2 __ovld __cnfn convert_double2_rte(ulong2);
+double2 __ovld __cnfn convert_double2_rte(ushort2);
+double2 __ovld __cnfn convert_double2_rtn(char2);
+double2 __ovld __cnfn convert_double2_rtn(double2);
+double2 __ovld __cnfn convert_double2_rtn(float2);
+double2 __ovld __cnfn convert_double2_rtn(int2);
+double2 __ovld __cnfn convert_double2_rtn(long2);
+double2 __ovld __cnfn convert_double2_rtn(short2);
+double2 __ovld __cnfn convert_double2_rtn(uchar2);
+double2 __ovld __cnfn convert_double2_rtn(uint2);
+double2 __ovld __cnfn convert_double2_rtn(ulong2);
+double2 __ovld __cnfn convert_double2_rtn(ushort2);
+double2 __ovld __cnfn convert_double2_rtp(char2);
+double2 __ovld __cnfn convert_double2_rtp(double2);
+double2 __ovld __cnfn convert_double2_rtp(float2);
+double2 __ovld __cnfn convert_double2_rtp(int2);
+double2 __ovld __cnfn convert_double2_rtp(long2);
+double2 __ovld __cnfn convert_double2_rtp(short2);
+double2 __ovld __cnfn convert_double2_rtp(uchar2);
+double2 __ovld __cnfn convert_double2_rtp(uint2);
+double2 __ovld __cnfn convert_double2_rtp(ulong2);
+double2 __ovld __cnfn convert_double2_rtp(ushort2);
+double2 __ovld __cnfn convert_double2_rtz(char2);
+double2 __ovld __cnfn convert_double2_rtz(double2);
+double2 __ovld __cnfn convert_double2_rtz(float2);
+double2 __ovld __cnfn convert_double2_rtz(int2);
+double2 __ovld __cnfn convert_double2_rtz(long2);
+double2 __ovld __cnfn convert_double2_rtz(short2);
+double2 __ovld __cnfn convert_double2_rtz(uchar2);
+double2 __ovld __cnfn convert_double2_rtz(uint2);
+double2 __ovld __cnfn convert_double2_rtz(ulong2);
+double2 __ovld __cnfn convert_double2_rtz(ushort2);
+double3 __ovld __cnfn convert_double3(char3);
+double3 __ovld __cnfn convert_double3(double3);
+double3 __ovld __cnfn convert_double3(float3);
+double3 __ovld __cnfn convert_double3(int3);
+double3 __ovld __cnfn convert_double3(long3);
+double3 __ovld __cnfn convert_double3(short3);
+double3 __ovld __cnfn convert_double3(uchar3);
+double3 __ovld __cnfn convert_double3(uint3);
+double3 __ovld __cnfn convert_double3(ulong3);
+double3 __ovld __cnfn convert_double3(ushort3);
+double3 __ovld __cnfn convert_double3_rte(char3);
+double3 __ovld __cnfn convert_double3_rte(double3);
+double3 __ovld __cnfn convert_double3_rte(float3);
+double3 __ovld __cnfn convert_double3_rte(int3);
+double3 __ovld __cnfn convert_double3_rte(long3);
+double3 __ovld __cnfn convert_double3_rte(short3);
+double3 __ovld __cnfn convert_double3_rte(uchar3);
+double3 __ovld __cnfn convert_double3_rte(uint3);
+double3 __ovld __cnfn convert_double3_rte(ulong3);
+double3 __ovld __cnfn convert_double3_rte(ushort3);
+double3 __ovld __cnfn convert_double3_rtn(char3);
+double3 __ovld __cnfn convert_double3_rtn(double3);
+double3 __ovld __cnfn convert_double3_rtn(float3);
+double3 __ovld __cnfn convert_double3_rtn(int3);
+double3 __ovld __cnfn convert_double3_rtn(long3);
+double3 __ovld __cnfn convert_double3_rtn(short3);
+double3 __ovld __cnfn convert_double3_rtn(uchar3);
+double3 __ovld __cnfn convert_double3_rtn(uint3);
+double3 __ovld __cnfn convert_double3_rtn(ulong3);
+double3 __ovld __cnfn convert_double3_rtn(ushort3);
+double3 __ovld __cnfn convert_double3_rtp(char3);
+double3 __ovld __cnfn convert_double3_rtp(double3);
+double3 __ovld __cnfn convert_double3_rtp(float3);
+double3 __ovld __cnfn convert_double3_rtp(int3);
+double3 __ovld __cnfn convert_double3_rtp(long3);
+double3 __ovld __cnfn convert_double3_rtp(short3);
+double3 __ovld __cnfn convert_double3_rtp(uchar3);
+double3 __ovld __cnfn convert_double3_rtp(uint3);
+double3 __ovld __cnfn convert_double3_rtp(ulong3);
+double3 __ovld __cnfn convert_double3_rtp(ushort3);
+double3 __ovld __cnfn convert_double3_rtz(char3);
+double3 __ovld __cnfn convert_double3_rtz(double3);
+double3 __ovld __cnfn convert_double3_rtz(float3);
+double3 __ovld __cnfn convert_double3_rtz(int3);
+double3 __ovld __cnfn convert_double3_rtz(long3);
+double3 __ovld __cnfn convert_double3_rtz(short3);
+double3 __ovld __cnfn convert_double3_rtz(uchar3);
+double3 __ovld __cnfn convert_double3_rtz(uint3);
+double3 __ovld __cnfn convert_double3_rtz(ulong3);
+double3 __ovld __cnfn convert_double3_rtz(ushort3);
+double4 __ovld __cnfn convert_double4(char4);
+double4 __ovld __cnfn convert_double4(double4);
+double4 __ovld __cnfn convert_double4(float4);
+double4 __ovld __cnfn convert_double4(int4);
+double4 __ovld __cnfn convert_double4(long4);
+double4 __ovld __cnfn convert_double4(short4);
+double4 __ovld __cnfn convert_double4(uchar4);
+double4 __ovld __cnfn convert_double4(uint4);
+double4 __ovld __cnfn convert_double4(ulong4);
+double4 __ovld __cnfn convert_double4(ushort4);
+double4 __ovld __cnfn convert_double4_rte(char4);
+double4 __ovld __cnfn convert_double4_rte(double4);
+double4 __ovld __cnfn convert_double4_rte(float4);
+double4 __ovld __cnfn convert_double4_rte(int4);
+double4 __ovld __cnfn convert_double4_rte(long4);
+double4 __ovld __cnfn convert_double4_rte(short4);
+double4 __ovld __cnfn convert_double4_rte(uchar4);
+double4 __ovld __cnfn convert_double4_rte(uint4);
+double4 __ovld __cnfn convert_double4_rte(ulong4);
+double4 __ovld __cnfn convert_double4_rte(ushort4);
+double4 __ovld __cnfn convert_double4_rtn(char4);
+double4 __ovld __cnfn convert_double4_rtn(double4);
+double4 __ovld __cnfn convert_double4_rtn(float4);
+double4 __ovld __cnfn convert_double4_rtn(int4);
+double4 __ovld __cnfn convert_double4_rtn(long4);
+double4 __ovld __cnfn convert_double4_rtn(short4);
+double4 __ovld __cnfn convert_double4_rtn(uchar4);
+double4 __ovld __cnfn convert_double4_rtn(uint4);
+double4 __ovld __cnfn convert_double4_rtn(ulong4);
+double4 __ovld __cnfn convert_double4_rtn(ushort4);
+double4 __ovld __cnfn convert_double4_rtp(char4);
+double4 __ovld __cnfn convert_double4_rtp(double4);
+double4 __ovld __cnfn convert_double4_rtp(float4);
+double4 __ovld __cnfn convert_double4_rtp(int4);
+double4 __ovld __cnfn convert_double4_rtp(long4);
+double4 __ovld __cnfn convert_double4_rtp(short4);
+double4 __ovld __cnfn convert_double4_rtp(uchar4);
+double4 __ovld __cnfn convert_double4_rtp(uint4);
+double4 __ovld __cnfn convert_double4_rtp(ulong4);
+double4 __ovld __cnfn convert_double4_rtp(ushort4);
+double4 __ovld __cnfn convert_double4_rtz(char4);
+double4 __ovld __cnfn convert_double4_rtz(double4);
+double4 __ovld __cnfn convert_double4_rtz(float4);
+double4 __ovld __cnfn convert_double4_rtz(int4);
+double4 __ovld __cnfn convert_double4_rtz(long4);
+double4 __ovld __cnfn convert_double4_rtz(short4);
+double4 __ovld __cnfn convert_double4_rtz(uchar4);
+double4 __ovld __cnfn convert_double4_rtz(uint4);
+double4 __ovld __cnfn convert_double4_rtz(ulong4);
+double4 __ovld __cnfn convert_double4_rtz(ushort4);
+double8 __ovld __cnfn convert_double8(char8);
+double8 __ovld __cnfn convert_double8(double8);
+double8 __ovld __cnfn convert_double8(float8);
+double8 __ovld __cnfn convert_double8(int8);
+double8 __ovld __cnfn convert_double8(long8);
+double8 __ovld __cnfn convert_double8(short8);
+double8 __ovld __cnfn convert_double8(uchar8);
+double8 __ovld __cnfn convert_double8(uint8);
+double8 __ovld __cnfn convert_double8(ulong8);
+double8 __ovld __cnfn convert_double8(ushort8);
+double8 __ovld __cnfn convert_double8_rte(char8);
+double8 __ovld __cnfn convert_double8_rte(double8);
+double8 __ovld __cnfn convert_double8_rte(float8);
+double8 __ovld __cnfn convert_double8_rte(int8);
+double8 __ovld __cnfn convert_double8_rte(long8);
+double8 __ovld __cnfn convert_double8_rte(short8);
+double8 __ovld __cnfn convert_double8_rte(uchar8);
+double8 __ovld __cnfn convert_double8_rte(uint8);
+double8 __ovld __cnfn convert_double8_rte(ulong8);
+double8 __ovld __cnfn convert_double8_rte(ushort8);
+double8 __ovld __cnfn convert_double8_rtn(char8);
+double8 __ovld __cnfn convert_double8_rtn(double8);
+double8 __ovld __cnfn convert_double8_rtn(float8);
+double8 __ovld __cnfn convert_double8_rtn(int8);
+double8 __ovld __cnfn convert_double8_rtn(long8);
+double8 __ovld __cnfn convert_double8_rtn(short8);
+double8 __ovld __cnfn convert_double8_rtn(uchar8);
+double8 __ovld __cnfn convert_double8_rtn(uint8);
+double8 __ovld __cnfn convert_double8_rtn(ulong8);
+double8 __ovld __cnfn convert_double8_rtn(ushort8);
+double8 __ovld __cnfn convert_double8_rtp(char8);
+double8 __ovld __cnfn convert_double8_rtp(double8);
+double8 __ovld __cnfn convert_double8_rtp(float8);
+double8 __ovld __cnfn convert_double8_rtp(int8);
+double8 __ovld __cnfn convert_double8_rtp(long8);
+double8 __ovld __cnfn convert_double8_rtp(short8);
+double8 __ovld __cnfn convert_double8_rtp(uchar8);
+double8 __ovld __cnfn convert_double8_rtp(uint8);
+double8 __ovld __cnfn convert_double8_rtp(ulong8);
+double8 __ovld __cnfn convert_double8_rtp(ushort8);
+double8 __ovld __cnfn convert_double8_rtz(char8);
+double8 __ovld __cnfn convert_double8_rtz(double8);
+double8 __ovld __cnfn convert_double8_rtz(float8);
+double8 __ovld __cnfn convert_double8_rtz(int8);
+double8 __ovld __cnfn convert_double8_rtz(long8);
+double8 __ovld __cnfn convert_double8_rtz(short8);
+double8 __ovld __cnfn convert_double8_rtz(uchar8);
+double8 __ovld __cnfn convert_double8_rtz(uint8);
+double8 __ovld __cnfn convert_double8_rtz(ulong8);
+double8 __ovld __cnfn convert_double8_rtz(ushort8);
+double16 __ovld __cnfn convert_double16(char16);
+double16 __ovld __cnfn convert_double16(double16);
+double16 __ovld __cnfn convert_double16(float16);
+double16 __ovld __cnfn convert_double16(int16);
+double16 __ovld __cnfn convert_double16(long16);
+double16 __ovld __cnfn convert_double16(short16);
+double16 __ovld __cnfn convert_double16(uchar16);
+double16 __ovld __cnfn convert_double16(uint16);
+double16 __ovld __cnfn convert_double16(ulong16);
+double16 __ovld __cnfn convert_double16(ushort16);
+double16 __ovld __cnfn convert_double16_rte(char16);
+double16 __ovld __cnfn convert_double16_rte(double16);
+double16 __ovld __cnfn convert_double16_rte(float16);
+double16 __ovld __cnfn convert_double16_rte(int16);
+double16 __ovld __cnfn convert_double16_rte(long16);
+double16 __ovld __cnfn convert_double16_rte(short16);
+double16 __ovld __cnfn convert_double16_rte(uchar16);
+double16 __ovld __cnfn convert_double16_rte(uint16);
+double16 __ovld __cnfn convert_double16_rte(ulong16);
+double16 __ovld __cnfn convert_double16_rte(ushort16);
+double16 __ovld __cnfn convert_double16_rtn(char16);
+double16 __ovld __cnfn convert_double16_rtn(double16);
+double16 __ovld __cnfn convert_double16_rtn(float16);
+double16 __ovld __cnfn convert_double16_rtn(int16);
+double16 __ovld __cnfn convert_double16_rtn(long16);
+double16 __ovld __cnfn convert_double16_rtn(short16);
+double16 __ovld __cnfn convert_double16_rtn(uchar16);
+double16 __ovld __cnfn convert_double16_rtn(uint16);
+double16 __ovld __cnfn convert_double16_rtn(ulong16);
+double16 __ovld __cnfn convert_double16_rtn(ushort16);
+double16 __ovld __cnfn convert_double16_rtp(char16);
+double16 __ovld __cnfn convert_double16_rtp(double16);
+double16 __ovld __cnfn convert_double16_rtp(float16);
+double16 __ovld __cnfn convert_double16_rtp(int16);
+double16 __ovld __cnfn convert_double16_rtp(long16);
+double16 __ovld __cnfn convert_double16_rtp(short16);
+double16 __ovld __cnfn convert_double16_rtp(uchar16);
+double16 __ovld __cnfn convert_double16_rtp(uint16);
+double16 __ovld __cnfn convert_double16_rtp(ulong16);
+double16 __ovld __cnfn convert_double16_rtp(ushort16);
+double16 __ovld __cnfn convert_double16_rtz(char16);
+double16 __ovld __cnfn convert_double16_rtz(double16);
+double16 __ovld __cnfn convert_double16_rtz(float16);
+double16 __ovld __cnfn convert_double16_rtz(int16);
+double16 __ovld __cnfn convert_double16_rtz(long16);
+double16 __ovld __cnfn convert_double16_rtz(short16);
+double16 __ovld __cnfn convert_double16_rtz(uchar16);
+double16 __ovld __cnfn convert_double16_rtz(uint16);
+double16 __ovld __cnfn convert_double16_rtz(ulong16);
+double16 __ovld __cnfn convert_double16_rtz(ushort16);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+// Convert half types to non-double types.
+uchar __ovld __cnfn convert_uchar(half);
+uchar __ovld __cnfn convert_uchar_rte(half);
+uchar __ovld __cnfn convert_uchar_rtp(half);
+uchar __ovld __cnfn convert_uchar_rtn(half);
+uchar __ovld __cnfn convert_uchar_rtz(half);
+uchar __ovld __cnfn convert_uchar_sat(half);
+uchar __ovld __cnfn convert_uchar_sat_rte(half);
+uchar __ovld __cnfn convert_uchar_sat_rtp(half);
+uchar __ovld __cnfn convert_uchar_sat_rtn(half);
+uchar __ovld __cnfn convert_uchar_sat_rtz(half);
+uchar2 __ovld __cnfn convert_uchar2(half2);
+uchar2 __ovld __cnfn convert_uchar2_rte(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtp(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtn(half2);
+uchar2 __ovld __cnfn convert_uchar2_rtz(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2);
+uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2);
+uchar3 __ovld __cnfn convert_uchar3(half3);
+uchar3 __ovld __cnfn convert_uchar3_rte(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtp(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtn(half3);
+uchar3 __ovld __cnfn convert_uchar3_rtz(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3);
+uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3);
+uchar4 __ovld __cnfn convert_uchar4(half4);
+uchar4 __ovld __cnfn convert_uchar4_rte(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtp(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtn(half4);
+uchar4 __ovld __cnfn convert_uchar4_rtz(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4);
+uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4);
+uchar8 __ovld __cnfn convert_uchar8(half8);
+uchar8 __ovld __cnfn convert_uchar8_rte(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtp(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtn(half8);
+uchar8 __ovld __cnfn convert_uchar8_rtz(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8);
+uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8);
+uchar16 __ovld __cnfn convert_uchar16(half16);
+uchar16 __ovld __cnfn convert_uchar16_rte(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtp(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtn(half16);
+uchar16 __ovld __cnfn convert_uchar16_rtz(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16);
+uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16);
+ushort __ovld __cnfn convert_ushort(half);
+ushort __ovld __cnfn convert_ushort_rte(half);
+ushort __ovld __cnfn convert_ushort_rtp(half);
+ushort __ovld __cnfn convert_ushort_rtn(half);
+ushort __ovld __cnfn convert_ushort_rtz(half);
+ushort __ovld __cnfn convert_ushort_sat(half);
+ushort __ovld __cnfn convert_ushort_sat_rte(half);
+ushort __ovld __cnfn convert_ushort_sat_rtp(half);
+ushort __ovld __cnfn convert_ushort_sat_rtn(half);
+ushort __ovld __cnfn convert_ushort_sat_rtz(half);
+ushort2 __ovld __cnfn convert_ushort2(half2);
+ushort2 __ovld __cnfn convert_ushort2_rte(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtp(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtn(half2);
+ushort2 __ovld __cnfn convert_ushort2_rtz(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2);
+ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2);
+ushort3 __ovld __cnfn convert_ushort3(half3);
+ushort3 __ovld __cnfn convert_ushort3_rte(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtp(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtn(half3);
+ushort3 __ovld __cnfn convert_ushort3_rtz(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3);
+ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3);
+ushort4 __ovld __cnfn convert_ushort4(half4);
+ushort4 __ovld __cnfn convert_ushort4_rte(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtp(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtn(half4);
+ushort4 __ovld __cnfn convert_ushort4_rtz(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4);
+ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4);
+ushort8 __ovld __cnfn convert_ushort8(half8);
+ushort8 __ovld __cnfn convert_ushort8_rte(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtp(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtn(half8);
+ushort8 __ovld __cnfn convert_ushort8_rtz(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8);
+ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8);
+ushort16 __ovld __cnfn convert_ushort16(half16);
+ushort16 __ovld __cnfn convert_ushort16_rte(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtp(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtn(half16);
+ushort16 __ovld __cnfn convert_ushort16_rtz(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16);
+ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16);
+uint __ovld __cnfn convert_uint(half);
+uint __ovld __cnfn convert_uint_rte(half);
+uint __ovld __cnfn convert_uint_rtp(half);
+uint __ovld __cnfn convert_uint_rtn(half);
+uint __ovld __cnfn convert_uint_rtz(half);
+uint __ovld __cnfn convert_uint_sat(half);
+uint __ovld __cnfn convert_uint_sat_rte(half);
+uint __ovld __cnfn convert_uint_sat_rtp(half);
+uint __ovld __cnfn convert_uint_sat_rtn(half);
+uint __ovld __cnfn convert_uint_sat_rtz(half);
+uint2 __ovld __cnfn convert_uint2(half2);
+uint2 __ovld __cnfn convert_uint2_rte(half2);
+uint2 __ovld __cnfn convert_uint2_rtp(half2);
+uint2 __ovld __cnfn convert_uint2_rtn(half2);
+uint2 __ovld __cnfn convert_uint2_rtz(half2);
+uint2 __ovld __cnfn convert_uint2_sat(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rte(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtp(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtn(half2);
+uint2 __ovld __cnfn convert_uint2_sat_rtz(half2);
+uint3 __ovld __cnfn convert_uint3(half3);
+uint3 __ovld __cnfn convert_uint3_rte(half3);
+uint3 __ovld __cnfn convert_uint3_rtp(half3);
+uint3 __ovld __cnfn convert_uint3_rtn(half3);
+uint3 __ovld __cnfn convert_uint3_rtz(half3);
+uint3 __ovld __cnfn convert_uint3_sat(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rte(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtp(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtn(half3);
+uint3 __ovld __cnfn convert_uint3_sat_rtz(half3);
+uint4 __ovld __cnfn convert_uint4(half4);
+uint4 __ovld __cnfn convert_uint4_rte(half4);
+uint4 __ovld __cnfn convert_uint4_rtp(half4);
+uint4 __ovld __cnfn convert_uint4_rtn(half4);
+uint4 __ovld __cnfn convert_uint4_rtz(half4);
+uint4 __ovld __cnfn convert_uint4_sat(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rte(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtp(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtn(half4);
+uint4 __ovld __cnfn convert_uint4_sat_rtz(half4);
+uint8 __ovld __cnfn convert_uint8(half8);
+uint8 __ovld __cnfn convert_uint8_rte(half8);
+uint8 __ovld __cnfn convert_uint8_rtp(half8);
+uint8 __ovld __cnfn convert_uint8_rtn(half8);
+uint8 __ovld __cnfn convert_uint8_rtz(half8);
+uint8 __ovld __cnfn convert_uint8_sat(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rte(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtp(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtn(half8);
+uint8 __ovld __cnfn convert_uint8_sat_rtz(half8);
+uint16 __ovld __cnfn convert_uint16(half16);
+uint16 __ovld __cnfn convert_uint16_rte(half16);
+uint16 __ovld __cnfn convert_uint16_rtp(half16);
+uint16 __ovld __cnfn convert_uint16_rtn(half16);
+uint16 __ovld __cnfn convert_uint16_rtz(half16);
+uint16 __ovld __cnfn convert_uint16_sat(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rte(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtp(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtn(half16);
+uint16 __ovld __cnfn convert_uint16_sat_rtz(half16);
+ulong __ovld __cnfn convert_ulong(half);
+ulong __ovld __cnfn convert_ulong_rte(half);
+ulong __ovld __cnfn convert_ulong_rtp(half);
+ulong __ovld __cnfn convert_ulong_rtn(half);
+ulong __ovld __cnfn convert_ulong_rtz(half);
+ulong __ovld __cnfn convert_ulong_sat(half);
+ulong __ovld __cnfn convert_ulong_sat_rte(half);
+ulong __ovld __cnfn convert_ulong_sat_rtp(half);
+ulong __ovld __cnfn convert_ulong_sat_rtn(half);
+ulong __ovld __cnfn convert_ulong_sat_rtz(half);
+ulong2 __ovld __cnfn convert_ulong2(half2);
+ulong2 __ovld __cnfn convert_ulong2_rte(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtp(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtn(half2);
+ulong2 __ovld __cnfn convert_ulong2_rtz(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2);
+ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2);
+ulong3 __ovld __cnfn convert_ulong3(half3);
+ulong3 __ovld __cnfn convert_ulong3_rte(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtp(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtn(half3);
+ulong3 __ovld __cnfn convert_ulong3_rtz(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3);
+ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3);
+ulong4 __ovld __cnfn convert_ulong4(half4);
+ulong4 __ovld __cnfn convert_ulong4_rte(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtp(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtn(half4);
+ulong4 __ovld __cnfn convert_ulong4_rtz(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4);
+ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4);
+ulong8 __ovld __cnfn convert_ulong8(half8);
+ulong8 __ovld __cnfn convert_ulong8_rte(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtp(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtn(half8);
+ulong8 __ovld __cnfn convert_ulong8_rtz(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8);
+ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8);
+ulong16 __ovld __cnfn convert_ulong16(half16);
+ulong16 __ovld __cnfn convert_ulong16_rte(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtp(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtn(half16);
+ulong16 __ovld __cnfn convert_ulong16_rtz(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16);
+ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16);
+char __ovld __cnfn convert_char(half);
+char __ovld __cnfn convert_char_rte(half);
+char __ovld __cnfn convert_char_rtp(half);
+char __ovld __cnfn convert_char_rtn(half);
+char __ovld __cnfn convert_char_rtz(half);
+char __ovld __cnfn convert_char_sat(half);
+char __ovld __cnfn convert_char_sat_rte(half);
+char __ovld __cnfn convert_char_sat_rtp(half);
+char __ovld __cnfn convert_char_sat_rtn(half);
+char __ovld __cnfn convert_char_sat_rtz(half);
+char2 __ovld __cnfn convert_char2(half2);
+char2 __ovld __cnfn convert_char2_rte(half2);
+char2 __ovld __cnfn convert_char2_rtp(half2);
+char2 __ovld __cnfn convert_char2_rtn(half2);
+char2 __ovld __cnfn convert_char2_rtz(half2);
+char2 __ovld __cnfn convert_char2_sat(half2);
+char2 __ovld __cnfn convert_char2_sat_rte(half2);
+char2 __ovld __cnfn convert_char2_sat_rtp(half2);
+char2 __ovld __cnfn convert_char2_sat_rtn(half2);
+char2 __ovld __cnfn convert_char2_sat_rtz(half2);
+char3 __ovld __cnfn convert_char3(half3);
+char3 __ovld __cnfn convert_char3_rte(half3);
+char3 __ovld __cnfn convert_char3_rtp(half3);
+char3 __ovld __cnfn convert_char3_rtn(half3);
+char3 __ovld __cnfn convert_char3_rtz(half3);
+char3 __ovld __cnfn convert_char3_sat(half3);
+char3 __ovld __cnfn convert_char3_sat_rte(half3);
+char3 __ovld __cnfn convert_char3_sat_rtp(half3);
+char3 __ovld __cnfn convert_char3_sat_rtn(half3);
+char3 __ovld __cnfn convert_char3_sat_rtz(half3);
+char4 __ovld __cnfn convert_char4(half4);
+char4 __ovld __cnfn convert_char4_rte(half4);
+char4 __ovld __cnfn convert_char4_rtp(half4);
+char4 __ovld __cnfn convert_char4_rtn(half4);
+char4 __ovld __cnfn convert_char4_rtz(half4);
+char4 __ovld __cnfn convert_char4_sat(half4);
+char4 __ovld __cnfn convert_char4_sat_rte(half4);
+char4 __ovld __cnfn convert_char4_sat_rtp(half4);
+char4 __ovld __cnfn convert_char4_sat_rtn(half4);
+char4 __ovld __cnfn convert_char4_sat_rtz(half4);
+char8 __ovld __cnfn convert_char8(half8);
+char8 __ovld __cnfn convert_char8_rte(half8);
+char8 __ovld __cnfn convert_char8_rtp(half8);
+char8 __ovld __cnfn convert_char8_rtn(half8);
+char8 __ovld __cnfn convert_char8_rtz(half8);
+char8 __ovld __cnfn convert_char8_sat(half8);
+char8 __ovld __cnfn convert_char8_sat_rte(half8);
+char8 __ovld __cnfn convert_char8_sat_rtp(half8);
+char8 __ovld __cnfn convert_char8_sat_rtn(half8);
+char8 __ovld __cnfn convert_char8_sat_rtz(half8);
+char16 __ovld __cnfn convert_char16(half16);
+char16 __ovld __cnfn convert_char16_rte(half16);
+char16 __ovld __cnfn convert_char16_rtp(half16);
+char16 __ovld __cnfn convert_char16_rtn(half16);
+char16 __ovld __cnfn convert_char16_rtz(half16);
+char16 __ovld __cnfn convert_char16_sat(half16);
+char16 __ovld __cnfn convert_char16_sat_rte(half16);
+char16 __ovld __cnfn convert_char16_sat_rtp(half16);
+char16 __ovld __cnfn convert_char16_sat_rtn(half16);
+char16 __ovld __cnfn convert_char16_sat_rtz(half16);
+short __ovld __cnfn convert_short(half);
+short __ovld __cnfn convert_short_rte(half);
+short __ovld __cnfn convert_short_rtp(half);
+short __ovld __cnfn convert_short_rtn(half);
+short __ovld __cnfn convert_short_rtz(half);
+short __ovld __cnfn convert_short_sat(half);
+short __ovld __cnfn convert_short_sat_rte(half);
+short __ovld __cnfn convert_short_sat_rtp(half);
+short __ovld __cnfn convert_short_sat_rtn(half);
+short __ovld __cnfn convert_short_sat_rtz(half);
+short2 __ovld __cnfn convert_short2(half2);
+short2 __ovld __cnfn convert_short2_rte(half2);
+short2 __ovld __cnfn convert_short2_rtp(half2);
+short2 __ovld __cnfn convert_short2_rtn(half2);
+short2 __ovld __cnfn convert_short2_rtz(half2);
+short2 __ovld __cnfn convert_short2_sat(half2);
+short2 __ovld __cnfn convert_short2_sat_rte(half2);
+short2 __ovld __cnfn convert_short2_sat_rtp(half2);
+short2 __ovld __cnfn convert_short2_sat_rtn(half2);
+short2 __ovld __cnfn convert_short2_sat_rtz(half2);
+short3 __ovld __cnfn convert_short3(half3);
+short3 __ovld __cnfn convert_short3_rte(half3);
+short3 __ovld __cnfn convert_short3_rtp(half3);
+short3 __ovld __cnfn convert_short3_rtn(half3);
+short3 __ovld __cnfn convert_short3_rtz(half3);
+short3 __ovld __cnfn convert_short3_sat(half3);
+short3 __ovld __cnfn convert_short3_sat_rte(half3);
+short3 __ovld __cnfn convert_short3_sat_rtp(half3);
+short3 __ovld __cnfn convert_short3_sat_rtn(half3);
+short3 __ovld __cnfn convert_short3_sat_rtz(half3);
+short4 __ovld __cnfn convert_short4(half4);
+short4 __ovld __cnfn convert_short4_rte(half4);
+short4 __ovld __cnfn convert_short4_rtp(half4);
+short4 __ovld __cnfn convert_short4_rtn(half4);
+short4 __ovld __cnfn convert_short4_rtz(half4);
+short4 __ovld __cnfn convert_short4_sat(half4);
+short4 __ovld __cnfn convert_short4_sat_rte(half4);
+short4 __ovld __cnfn convert_short4_sat_rtp(half4);
+short4 __ovld __cnfn convert_short4_sat_rtn(half4);
+short4 __ovld __cnfn convert_short4_sat_rtz(half4);
+short8 __ovld __cnfn convert_short8(half8);
+short8 __ovld __cnfn convert_short8_rte(half8);
+short8 __ovld __cnfn convert_short8_rtp(half8);
+short8 __ovld __cnfn convert_short8_rtn(half8);
+short8 __ovld __cnfn convert_short8_rtz(half8);
+short8 __ovld __cnfn convert_short8_sat(half8);
+short8 __ovld __cnfn convert_short8_sat_rte(half8);
+short8 __ovld __cnfn convert_short8_sat_rtp(half8);
+short8 __ovld __cnfn convert_short8_sat_rtn(half8);
+short8 __ovld __cnfn convert_short8_sat_rtz(half8);
+short16 __ovld __cnfn convert_short16(half16);
+short16 __ovld __cnfn convert_short16_rte(half16);
+short16 __ovld __cnfn convert_short16_rtp(half16);
+short16 __ovld __cnfn convert_short16_rtn(half16);
+short16 __ovld __cnfn convert_short16_rtz(half16);
+short16 __ovld __cnfn convert_short16_sat(half16);
+short16 __ovld __cnfn convert_short16_sat_rte(half16);
+short16 __ovld __cnfn convert_short16_sat_rtp(half16);
+short16 __ovld __cnfn convert_short16_sat_rtn(half16);
+short16 __ovld __cnfn convert_short16_sat_rtz(half16);
+int __ovld __cnfn convert_int(half);
+int __ovld __cnfn convert_int_rte(half);
+int __ovld __cnfn convert_int_rtp(half);
+int __ovld __cnfn convert_int_rtn(half);
+int __ovld __cnfn convert_int_rtz(half);
+int __ovld __cnfn convert_int_sat(half);
+int __ovld __cnfn convert_int_sat_rte(half);
+int __ovld __cnfn convert_int_sat_rtp(half);
+int __ovld __cnfn convert_int_sat_rtn(half);
+int __ovld __cnfn convert_int_sat_rtz(half);
+int2 __ovld __cnfn convert_int2(half2);
+int2 __ovld __cnfn convert_int2_rte(half2);
+int2 __ovld __cnfn convert_int2_rtp(half2);
+int2 __ovld __cnfn convert_int2_rtn(half2);
+int2 __ovld __cnfn convert_int2_rtz(half2);
+int2 __ovld __cnfn convert_int2_sat(half2);
+int2 __ovld __cnfn convert_int2_sat_rte(half2);
+int2 __ovld __cnfn convert_int2_sat_rtp(half2);
+int2 __ovld __cnfn convert_int2_sat_rtn(half2);
+int2 __ovld __cnfn convert_int2_sat_rtz(half2);
+int3 __ovld __cnfn convert_int3(half3);
+int3 __ovld __cnfn convert_int3_rte(half3);
+int3 __ovld __cnfn convert_int3_rtp(half3);
+int3 __ovld __cnfn convert_int3_rtn(half3);
+int3 __ovld __cnfn convert_int3_rtz(half3);
+int3 __ovld __cnfn convert_int3_sat(half3);
+int3 __ovld __cnfn convert_int3_sat_rte(half3);
+int3 __ovld __cnfn convert_int3_sat_rtp(half3);
+int3 __ovld __cnfn convert_int3_sat_rtn(half3);
+int3 __ovld __cnfn convert_int3_sat_rtz(half3);
+int4 __ovld __cnfn convert_int4(half4);
+int4 __ovld __cnfn convert_int4_rte(half4);
+int4 __ovld __cnfn convert_int4_rtp(half4);
+int4 __ovld __cnfn convert_int4_rtn(half4);
+int4 __ovld __cnfn convert_int4_rtz(half4);
+int4 __ovld __cnfn convert_int4_sat(half4);
+int4 __ovld __cnfn convert_int4_sat_rte(half4);
+int4 __ovld __cnfn convert_int4_sat_rtp(half4);
+int4 __ovld __cnfn convert_int4_sat_rtn(half4);
+int4 __ovld __cnfn convert_int4_sat_rtz(half4);
+int8 __ovld __cnfn convert_int8(half8);
+int8 __ovld __cnfn convert_int8_rte(half8);
+int8 __ovld __cnfn convert_int8_rtp(half8);
+int8 __ovld __cnfn convert_int8_rtn(half8);
+int8 __ovld __cnfn convert_int8_rtz(half8);
+int8 __ovld __cnfn convert_int8_sat(half8);
+int8 __ovld __cnfn convert_int8_sat_rte(half8);
+int8 __ovld __cnfn convert_int8_sat_rtp(half8);
+int8 __ovld __cnfn convert_int8_sat_rtn(half8);
+int8 __ovld __cnfn convert_int8_sat_rtz(half8);
+int16 __ovld __cnfn convert_int16(half16);
+int16 __ovld __cnfn convert_int16_rte(half16);
+int16 __ovld __cnfn convert_int16_rtp(half16);
+int16 __ovld __cnfn convert_int16_rtn(half16);
+int16 __ovld __cnfn convert_int16_rtz(half16);
+int16 __ovld __cnfn convert_int16_sat(half16);
+int16 __ovld __cnfn convert_int16_sat_rte(half16);
+int16 __ovld __cnfn convert_int16_sat_rtp(half16);
+int16 __ovld __cnfn convert_int16_sat_rtn(half16);
+int16 __ovld __cnfn convert_int16_sat_rtz(half16);
+long __ovld __cnfn convert_long(half);
+long __ovld __cnfn convert_long_rte(half);
+long __ovld __cnfn convert_long_rtp(half);
+long __ovld __cnfn convert_long_rtn(half);
+long __ovld __cnfn convert_long_rtz(half);
+long __ovld __cnfn convert_long_sat(half);
+long __ovld __cnfn convert_long_sat_rte(half);
+long __ovld __cnfn convert_long_sat_rtp(half);
+long __ovld __cnfn convert_long_sat_rtn(half);
+long __ovld __cnfn convert_long_sat_rtz(half);
+long2 __ovld __cnfn convert_long2(half2);
+long2 __ovld __cnfn convert_long2_rte(half2);
+long2 __ovld __cnfn convert_long2_rtp(half2);
+long2 __ovld __cnfn convert_long2_rtn(half2);
+long2 __ovld __cnfn convert_long2_rtz(half2);
+long2 __ovld __cnfn convert_long2_sat(half2);
+long2 __ovld __cnfn convert_long2_sat_rte(half2);
+long2 __ovld __cnfn convert_long2_sat_rtp(half2);
+long2 __ovld __cnfn convert_long2_sat_rtn(half2);
+long2 __ovld __cnfn convert_long2_sat_rtz(half2);
+long3 __ovld __cnfn convert_long3(half3);
+long3 __ovld __cnfn convert_long3_rte(half3);
+long3 __ovld __cnfn convert_long3_rtp(half3);
+long3 __ovld __cnfn convert_long3_rtn(half3);
+long3 __ovld __cnfn convert_long3_rtz(half3);
+long3 __ovld __cnfn convert_long3_sat(half3);
+long3 __ovld __cnfn convert_long3_sat_rte(half3);
+long3 __ovld __cnfn convert_long3_sat_rtp(half3);
+long3 __ovld __cnfn convert_long3_sat_rtn(half3);
+long3 __ovld __cnfn convert_long3_sat_rtz(half3);
+long4 __ovld __cnfn convert_long4(half4);
+long4 __ovld __cnfn convert_long4_rte(half4);
+long4 __ovld __cnfn convert_long4_rtp(half4);
+long4 __ovld __cnfn convert_long4_rtn(half4);
+long4 __ovld __cnfn convert_long4_rtz(half4);
+long4 __ovld __cnfn convert_long4_sat(half4);
+long4 __ovld __cnfn convert_long4_sat_rte(half4);
+long4 __ovld __cnfn convert_long4_sat_rtp(half4);
+long4 __ovld __cnfn convert_long4_sat_rtn(half4);
+long4 __ovld __cnfn convert_long4_sat_rtz(half4);
+long8 __ovld __cnfn convert_long8(half8);
+long8 __ovld __cnfn convert_long8_rte(half8);
+long8 __ovld __cnfn convert_long8_rtp(half8);
+long8 __ovld __cnfn convert_long8_rtn(half8);
+long8 __ovld __cnfn convert_long8_rtz(half8);
+long8 __ovld __cnfn convert_long8_sat(half8);
+long8 __ovld __cnfn convert_long8_sat_rte(half8);
+long8 __ovld __cnfn convert_long8_sat_rtp(half8);
+long8 __ovld __cnfn convert_long8_sat_rtn(half8);
+long8 __ovld __cnfn convert_long8_sat_rtz(half8);
+long16 __ovld __cnfn convert_long16(half16);
+long16 __ovld __cnfn convert_long16_rte(half16);
+long16 __ovld __cnfn convert_long16_rtp(half16);
+long16 __ovld __cnfn convert_long16_rtn(half16);
+long16 __ovld __cnfn convert_long16_rtz(half16);
+long16 __ovld __cnfn convert_long16_sat(half16);
+long16 __ovld __cnfn convert_long16_sat_rte(half16);
+long16 __ovld __cnfn convert_long16_sat_rtp(half16);
+long16 __ovld __cnfn convert_long16_sat_rtn(half16);
+long16 __ovld __cnfn convert_long16_sat_rtz(half16);
+float __ovld __cnfn convert_float(half);
+float __ovld __cnfn convert_float_rte(half);
+float __ovld __cnfn convert_float_rtp(half);
+float __ovld __cnfn convert_float_rtn(half);
+float __ovld __cnfn convert_float_rtz(half);
+float2 __ovld __cnfn convert_float2(half2);
+float2 __ovld __cnfn convert_float2_rte(half2);
+float2 __ovld __cnfn convert_float2_rtp(half2);
+float2 __ovld __cnfn convert_float2_rtn(half2);
+float2 __ovld __cnfn convert_float2_rtz(half2);
+float3 __ovld __cnfn convert_float3(half3);
+float3 __ovld __cnfn convert_float3_rte(half3);
+float3 __ovld __cnfn convert_float3_rtp(half3);
+float3 __ovld __cnfn convert_float3_rtn(half3);
+float3 __ovld __cnfn convert_float3_rtz(half3);
+float4 __ovld __cnfn convert_float4(half4);
+float4 __ovld __cnfn convert_float4_rte(half4);
+float4 __ovld __cnfn convert_float4_rtp(half4);
+float4 __ovld __cnfn convert_float4_rtn(half4);
+float4 __ovld __cnfn convert_float4_rtz(half4);
+float8 __ovld __cnfn convert_float8(half8);
+float8 __ovld __cnfn convert_float8_rte(half8);
+float8 __ovld __cnfn convert_float8_rtp(half8);
+float8 __ovld __cnfn convert_float8_rtn(half8);
+float8 __ovld __cnfn convert_float8_rtz(half8);
+float16 __ovld __cnfn convert_float16(half16);
+float16 __ovld __cnfn convert_float16_rte(half16);
+float16 __ovld __cnfn convert_float16_rtp(half16);
+float16 __ovld __cnfn convert_float16_rtn(half16);
+float16 __ovld __cnfn convert_float16_rtz(half16);
+
+// Convert non-double types to half types.
+half __ovld __cnfn convert_half(uchar);
+half __ovld __cnfn convert_half(ushort);
+half __ovld __cnfn convert_half(uint);
+half __ovld __cnfn convert_half(ulong);
+half __ovld __cnfn convert_half(char);
+half __ovld __cnfn convert_half(short);
+half __ovld __cnfn convert_half(int);
+half __ovld __cnfn convert_half(long);
+half __ovld __cnfn convert_half(float);
+half __ovld __cnfn convert_half(half);
+half __ovld __cnfn convert_half_rte(uchar);
+half __ovld __cnfn convert_half_rte(ushort);
+half __ovld __cnfn convert_half_rte(uint);
+half __ovld __cnfn convert_half_rte(ulong);
+half __ovld __cnfn convert_half_rte(char);
+half __ovld __cnfn convert_half_rte(short);
+half __ovld __cnfn convert_half_rte(int);
+half __ovld __cnfn convert_half_rte(long);
+half __ovld __cnfn convert_half_rte(float);
+half __ovld __cnfn convert_half_rte(half);
+half __ovld __cnfn convert_half_rtp(uchar);
+half __ovld __cnfn convert_half_rtp(ushort);
+half __ovld __cnfn convert_half_rtp(uint);
+half __ovld __cnfn convert_half_rtp(ulong);
+half __ovld __cnfn convert_half_rtp(char);
+half __ovld __cnfn convert_half_rtp(short);
+half __ovld __cnfn convert_half_rtp(int);
+half __ovld __cnfn convert_half_rtp(long);
+half __ovld __cnfn convert_half_rtp(float);
+half __ovld __cnfn convert_half_rtp(half);
+half __ovld __cnfn convert_half_rtn(uchar);
+half __ovld __cnfn convert_half_rtn(ushort);
+half __ovld __cnfn convert_half_rtn(uint);
+half __ovld __cnfn convert_half_rtn(ulong);
+half __ovld __cnfn convert_half_rtn(char);
+half __ovld __cnfn convert_half_rtn(short);
+half __ovld __cnfn convert_half_rtn(int);
+half __ovld __cnfn convert_half_rtn(long);
+half __ovld __cnfn convert_half_rtn(float);
+half __ovld __cnfn convert_half_rtn(half);
+half __ovld __cnfn convert_half_rtz(uchar);
+half __ovld __cnfn convert_half_rtz(ushort);
+half __ovld __cnfn convert_half_rtz(uint);
+half __ovld __cnfn convert_half_rtz(ulong);
+half __ovld __cnfn convert_half_rtz(char);
+half __ovld __cnfn convert_half_rtz(short);
+half __ovld __cnfn convert_half_rtz(int);
+half __ovld __cnfn convert_half_rtz(long);
+half __ovld __cnfn convert_half_rtz(float);
+half __ovld __cnfn convert_half_rtz(half);
+half2 __ovld __cnfn convert_half2(char2);
+half2 __ovld __cnfn convert_half2(uchar2);
+half2 __ovld __cnfn convert_half2(short2);
+half2 __ovld __cnfn convert_half2(ushort2);
+half2 __ovld __cnfn convert_half2(int2);
+half2 __ovld __cnfn convert_half2(uint2);
+half2 __ovld __cnfn convert_half2(long2);
+half2 __ovld __cnfn convert_half2(ulong2);
+half2 __ovld __cnfn convert_half2(float2);
+half2 __ovld __cnfn convert_half2(half2);
+half2 __ovld __cnfn convert_half2_rte(char2);
+half2 __ovld __cnfn convert_half2_rte(uchar2);
+half2 __ovld __cnfn convert_half2_rte(short2);
+half2 __ovld __cnfn convert_half2_rte(ushort2);
+half2 __ovld __cnfn convert_half2_rte(int2);
+half2 __ovld __cnfn convert_half2_rte(uint2);
+half2 __ovld __cnfn convert_half2_rte(long2);
+half2 __ovld __cnfn convert_half2_rte(ulong2);
+half2 __ovld __cnfn convert_half2_rte(float2);
+half2 __ovld __cnfn convert_half2_rte(half2);
+half2 __ovld __cnfn convert_half2_rtp(char2);
+half2 __ovld __cnfn convert_half2_rtp(uchar2);
+half2 __ovld __cnfn convert_half2_rtp(short2);
+half2 __ovld __cnfn convert_half2_rtp(ushort2);
+half2 __ovld __cnfn convert_half2_rtp(int2);
+half2 __ovld __cnfn convert_half2_rtp(uint2);
+half2 __ovld __cnfn convert_half2_rtp(long2);
+half2 __ovld __cnfn convert_half2_rtp(ulong2);
+half2 __ovld __cnfn convert_half2_rtp(float2);
+half2 __ovld __cnfn convert_half2_rtp(half2);
+half2 __ovld __cnfn convert_half2_rtn(char2);
+half2 __ovld __cnfn convert_half2_rtn(uchar2);
+half2 __ovld __cnfn convert_half2_rtn(short2);
+half2 __ovld __cnfn convert_half2_rtn(ushort2);
+half2 __ovld __cnfn convert_half2_rtn(int2);
+half2 __ovld __cnfn convert_half2_rtn(uint2);
+half2 __ovld __cnfn convert_half2_rtn(long2);
+half2 __ovld __cnfn convert_half2_rtn(ulong2);
+half2 __ovld __cnfn convert_half2_rtn(float2);
+half2 __ovld __cnfn convert_half2_rtn(half2);
+half2 __ovld __cnfn convert_half2_rtz(char2);
+half2 __ovld __cnfn convert_half2_rtz(uchar2);
+half2 __ovld __cnfn convert_half2_rtz(short2);
+half2 __ovld __cnfn convert_half2_rtz(ushort2);
+half2 __ovld __cnfn convert_half2_rtz(int2);
+half2 __ovld __cnfn convert_half2_rtz(uint2);
+half2 __ovld __cnfn convert_half2_rtz(long2);
+half2 __ovld __cnfn convert_half2_rtz(ulong2);
+half2 __ovld __cnfn convert_half2_rtz(float2);
+half2 __ovld __cnfn convert_half2_rtz(half2);
+half3 __ovld __cnfn convert_half3(char3);
+half3 __ovld __cnfn convert_half3(uchar3);
+half3 __ovld __cnfn convert_half3(short3);
+half3 __ovld __cnfn convert_half3(ushort3);
+half3 __ovld __cnfn convert_half3(int3);
+half3 __ovld __cnfn convert_half3(uint3);
+half3 __ovld __cnfn convert_half3(long3);
+half3 __ovld __cnfn convert_half3(ulong3);
+half3 __ovld __cnfn convert_half3(float3);
+half3 __ovld __cnfn convert_half3(half3);
+half3 __ovld __cnfn convert_half3_rte(char3);
+half3 __ovld __cnfn convert_half3_rte(uchar3);
+half3 __ovld __cnfn convert_half3_rte(short3);
+half3 __ovld __cnfn convert_half3_rte(ushort3);
+half3 __ovld __cnfn convert_half3_rte(int3);
+half3 __ovld __cnfn convert_half3_rte(uint3);
+half3 __ovld __cnfn convert_half3_rte(long3);
+half3 __ovld __cnfn convert_half3_rte(ulong3);
+half3 __ovld __cnfn convert_half3_rte(float3);
+half3 __ovld __cnfn convert_half3_rte(half3);
+half3 __ovld __cnfn convert_half3_rtp(char3);
+half3 __ovld __cnfn convert_half3_rtp(uchar3);
+half3 __ovld __cnfn convert_half3_rtp(short3);
+half3 __ovld __cnfn convert_half3_rtp(ushort3);
+half3 __ovld __cnfn convert_half3_rtp(int3);
+half3 __ovld __cnfn convert_half3_rtp(uint3);
+half3 __ovld __cnfn convert_half3_rtp(long3);
+half3 __ovld __cnfn convert_half3_rtp(ulong3);
+half3 __ovld __cnfn convert_half3_rtp(float3);
+half3 __ovld __cnfn convert_half3_rtp(half3);
+half3 __ovld __cnfn convert_half3_rtn(char3);
+half3 __ovld __cnfn convert_half3_rtn(uchar3);
+half3 __ovld __cnfn convert_half3_rtn(short3);
+half3 __ovld __cnfn convert_half3_rtn(ushort3);
+half3 __ovld __cnfn convert_half3_rtn(int3);
+half3 __ovld __cnfn convert_half3_rtn(uint3);
+half3 __ovld __cnfn convert_half3_rtn(long3);
+half3 __ovld __cnfn convert_half3_rtn(ulong3);
+half3 __ovld __cnfn convert_half3_rtn(float3);
+half3 __ovld __cnfn convert_half3_rtn(half3);
+half3 __ovld __cnfn convert_half3_rtz(char3);
+half3 __ovld __cnfn convert_half3_rtz(uchar3);
+half3 __ovld __cnfn convert_half3_rtz(short3);
+half3 __ovld __cnfn convert_half3_rtz(ushort3);
+half3 __ovld __cnfn convert_half3_rtz(int3);
+half3 __ovld __cnfn convert_half3_rtz(uint3);
+half3 __ovld __cnfn convert_half3_rtz(long3);
+half3 __ovld __cnfn convert_half3_rtz(ulong3);
+half3 __ovld __cnfn convert_half3_rtz(float3);
+half3 __ovld __cnfn convert_half3_rtz(half3);
+half4 __ovld __cnfn convert_half4(char4);
+half4 __ovld __cnfn convert_half4(uchar4);
+half4 __ovld __cnfn convert_half4(short4);
+half4 __ovld __cnfn convert_half4(ushort4);
+half4 __ovld __cnfn convert_half4(int4);
+half4 __ovld __cnfn convert_half4(uint4);
+half4 __ovld __cnfn convert_half4(long4);
+half4 __ovld __cnfn convert_half4(ulong4);
+half4 __ovld __cnfn convert_half4(float4);
+half4 __ovld __cnfn convert_half4(half4);
+half4 __ovld __cnfn convert_half4_rte(char4);
+half4 __ovld __cnfn convert_half4_rte(uchar4);
+half4 __ovld __cnfn convert_half4_rte(short4);
+half4 __ovld __cnfn convert_half4_rte(ushort4);
+half4 __ovld __cnfn convert_half4_rte(int4);
+half4 __ovld __cnfn convert_half4_rte(uint4);
+half4 __ovld __cnfn convert_half4_rte(long4);
+half4 __ovld __cnfn convert_half4_rte(ulong4);
+half4 __ovld __cnfn convert_half4_rte(float4);
+half4 __ovld __cnfn convert_half4_rte(half4);
+half4 __ovld __cnfn convert_half4_rtp(char4);
+half4 __ovld __cnfn convert_half4_rtp(uchar4);
+half4 __ovld __cnfn convert_half4_rtp(short4);
+half4 __ovld __cnfn convert_half4_rtp(ushort4);
+half4 __ovld __cnfn convert_half4_rtp(int4);
+half4 __ovld __cnfn convert_half4_rtp(uint4);
+half4 __ovld __cnfn convert_half4_rtp(long4);
+half4 __ovld __cnfn convert_half4_rtp(ulong4);
+half4 __ovld __cnfn convert_half4_rtp(float4);
+half4 __ovld __cnfn convert_half4_rtp(half4);
+half4 __ovld __cnfn convert_half4_rtn(char4);
+half4 __ovld __cnfn convert_half4_rtn(uchar4);
+half4 __ovld __cnfn convert_half4_rtn(short4);
+half4 __ovld __cnfn convert_half4_rtn(ushort4);
+half4 __ovld __cnfn convert_half4_rtn(int4);
+half4 __ovld __cnfn convert_half4_rtn(uint4);
+half4 __ovld __cnfn convert_half4_rtn(long4);
+half4 __ovld __cnfn convert_half4_rtn(ulong4);
+half4 __ovld __cnfn convert_half4_rtn(float4);
+half4 __ovld __cnfn convert_half4_rtn(half4);
+half4 __ovld __cnfn convert_half4_rtz(char4);
+half4 __ovld __cnfn convert_half4_rtz(uchar4);
+half4 __ovld __cnfn convert_half4_rtz(short4);
+half4 __ovld __cnfn convert_half4_rtz(ushort4);
+half4 __ovld __cnfn convert_half4_rtz(int4);
+half4 __ovld __cnfn convert_half4_rtz(uint4);
+half4 __ovld __cnfn convert_half4_rtz(long4);
+half4 __ovld __cnfn convert_half4_rtz(ulong4);
+half4 __ovld __cnfn convert_half4_rtz(float4);
+half4 __ovld __cnfn convert_half4_rtz(half4);
+half8 __ovld __cnfn convert_half8(char8);
+half8 __ovld __cnfn convert_half8(uchar8);
+half8 __ovld __cnfn convert_half8(short8);
+half8 __ovld __cnfn convert_half8(ushort8);
+half8 __ovld __cnfn convert_half8(int8);
+half8 __ovld __cnfn convert_half8(uint8);
+half8 __ovld __cnfn convert_half8(long8);
+half8 __ovld __cnfn convert_half8(ulong8);
+half8 __ovld __cnfn convert_half8(float8);
+half8 __ovld __cnfn convert_half8(half8);
+half8 __ovld __cnfn convert_half8_rte(char8);
+half8 __ovld __cnfn convert_half8_rte(uchar8);
+half8 __ovld __cnfn convert_half8_rte(short8);
+half8 __ovld __cnfn convert_half8_rte(ushort8);
+half8 __ovld __cnfn convert_half8_rte(int8);
+half8 __ovld __cnfn convert_half8_rte(uint8);
+half8 __ovld __cnfn convert_half8_rte(long8);
+half8 __ovld __cnfn convert_half8_rte(ulong8);
+half8 __ovld __cnfn convert_half8_rte(float8);
+half8 __ovld __cnfn convert_half8_rte(half8);
+half8 __ovld __cnfn convert_half8_rtp(char8);
+half8 __ovld __cnfn convert_half8_rtp(uchar8);
+half8 __ovld __cnfn convert_half8_rtp(short8);
+half8 __ovld __cnfn convert_half8_rtp(ushort8);
+half8 __ovld __cnfn convert_half8_rtp(int8);
+half8 __ovld __cnfn convert_half8_rtp(uint8);
+half8 __ovld __cnfn convert_half8_rtp(long8);
+half8 __ovld __cnfn convert_half8_rtp(ulong8);
+half8 __ovld __cnfn convert_half8_rtp(float8);
+half8 __ovld __cnfn convert_half8_rtp(half8);
+half8 __ovld __cnfn convert_half8_rtn(char8);
+half8 __ovld __cnfn convert_half8_rtn(uchar8);
+half8 __ovld __cnfn convert_half8_rtn(short8);
+half8 __ovld __cnfn convert_half8_rtn(ushort8);
+half8 __ovld __cnfn convert_half8_rtn(int8);
+half8 __ovld __cnfn convert_half8_rtn(uint8);
+half8 __ovld __cnfn convert_half8_rtn(long8);
+half8 __ovld __cnfn convert_half8_rtn(ulong8);
+half8 __ovld __cnfn convert_half8_rtn(float8);
+half8 __ovld __cnfn convert_half8_rtn(half8);
+half8 __ovld __cnfn convert_half8_rtz(char8);
+half8 __ovld __cnfn convert_half8_rtz(uchar8);
+half8 __ovld __cnfn convert_half8_rtz(short8);
+half8 __ovld __cnfn convert_half8_rtz(ushort8);
+half8 __ovld __cnfn convert_half8_rtz(int8);
+half8 __ovld __cnfn convert_half8_rtz(uint8);
+half8 __ovld __cnfn convert_half8_rtz(long8);
+half8 __ovld __cnfn convert_half8_rtz(ulong8);
+half8 __ovld __cnfn convert_half8_rtz(float8);
+half8 __ovld __cnfn convert_half8_rtz(half8);
+half16 __ovld __cnfn convert_half16(char16);
+half16 __ovld __cnfn convert_half16(uchar16);
+half16 __ovld __cnfn convert_half16(short16);
+half16 __ovld __cnfn convert_half16(ushort16);
+half16 __ovld __cnfn convert_half16(int16);
+half16 __ovld __cnfn convert_half16(uint16);
+half16 __ovld __cnfn convert_half16(long16);
+half16 __ovld __cnfn convert_half16(ulong16);
+half16 __ovld __cnfn convert_half16(float16);
+half16 __ovld __cnfn convert_half16(half16);
+half16 __ovld __cnfn convert_half16_rte(char16);
+half16 __ovld __cnfn convert_half16_rte(uchar16);
+half16 __ovld __cnfn convert_half16_rte(short16);
+half16 __ovld __cnfn convert_half16_rte(ushort16);
+half16 __ovld __cnfn convert_half16_rte(int16);
+half16 __ovld __cnfn convert_half16_rte(uint16);
+half16 __ovld __cnfn convert_half16_rte(long16);
+half16 __ovld __cnfn convert_half16_rte(ulong16);
+half16 __ovld __cnfn convert_half16_rte(float16);
+half16 __ovld __cnfn convert_half16_rte(half16);
+half16 __ovld __cnfn convert_half16_rtp(char16);
+half16 __ovld __cnfn convert_half16_rtp(uchar16);
+half16 __ovld __cnfn convert_half16_rtp(short16);
+half16 __ovld __cnfn convert_half16_rtp(ushort16);
+half16 __ovld __cnfn convert_half16_rtp(int16);
+half16 __ovld __cnfn convert_half16_rtp(uint16);
+half16 __ovld __cnfn convert_half16_rtp(long16);
+half16 __ovld __cnfn convert_half16_rtp(ulong16);
+half16 __ovld __cnfn convert_half16_rtp(float16);
+half16 __ovld __cnfn convert_half16_rtp(half16);
+half16 __ovld __cnfn convert_half16_rtn(char16);
+half16 __ovld __cnfn convert_half16_rtn(uchar16);
+half16 __ovld __cnfn convert_half16_rtn(short16);
+half16 __ovld __cnfn convert_half16_rtn(ushort16);
+half16 __ovld __cnfn convert_half16_rtn(int16);
+half16 __ovld __cnfn convert_half16_rtn(uint16);
+half16 __ovld __cnfn convert_half16_rtn(long16);
+half16 __ovld __cnfn convert_half16_rtn(ulong16);
+half16 __ovld __cnfn convert_half16_rtn(float16);
+half16 __ovld __cnfn convert_half16_rtn(half16);
+half16 __ovld __cnfn convert_half16_rtz(char16);
+half16 __ovld __cnfn convert_half16_rtz(uchar16);
+half16 __ovld __cnfn convert_half16_rtz(short16);
+half16 __ovld __cnfn convert_half16_rtz(ushort16);
+half16 __ovld __cnfn convert_half16_rtz(int16);
+half16 __ovld __cnfn convert_half16_rtz(uint16);
+half16 __ovld __cnfn convert_half16_rtz(long16);
+half16 __ovld __cnfn convert_half16_rtz(ulong16);
+half16 __ovld __cnfn convert_half16_rtz(float16);
+half16 __ovld __cnfn convert_half16_rtz(half16);
+
+// Convert half types to double types.
+#ifdef cl_khr_fp64
+double __ovld __cnfn convert_double(half);
+double __ovld __cnfn convert_double_rte(half);
+double __ovld __cnfn convert_double_rtp(half);
+double __ovld __cnfn convert_double_rtn(half);
+double __ovld __cnfn convert_double_rtz(half);
+double2 __ovld __cnfn convert_double2(half2);
+double2 __ovld __cnfn convert_double2_rte(half2);
+double2 __ovld __cnfn convert_double2_rtp(half2);
+double2 __ovld __cnfn convert_double2_rtn(half2);
+double2 __ovld __cnfn convert_double2_rtz(half2);
+double3 __ovld __cnfn convert_double3(half3);
+double3 __ovld __cnfn convert_double3_rte(half3);
+double3 __ovld __cnfn convert_double3_rtp(half3);
+double3 __ovld __cnfn convert_double3_rtn(half3);
+double3 __ovld __cnfn convert_double3_rtz(half3);
+double4 __ovld __cnfn convert_double4(half4);
+double4 __ovld __cnfn convert_double4_rte(half4);
+double4 __ovld __cnfn convert_double4_rtp(half4);
+double4 __ovld __cnfn convert_double4_rtn(half4);
+double4 __ovld __cnfn convert_double4_rtz(half4);
+double8 __ovld __cnfn convert_double8(half8);
+double8 __ovld __cnfn convert_double8_rte(half8);
+double8 __ovld __cnfn convert_double8_rtp(half8);
+double8 __ovld __cnfn convert_double8_rtn(half8);
+double8 __ovld __cnfn convert_double8_rtz(half8);
+double16 __ovld __cnfn convert_double16(half16);
+double16 __ovld __cnfn convert_double16_rte(half16);
+double16 __ovld __cnfn convert_double16_rtp(half16);
+double16 __ovld __cnfn convert_double16_rtn(half16);
+double16 __ovld __cnfn convert_double16_rtz(half16);
+
+// Convert double types to half types.
+half __ovld __cnfn convert_half(double);
+half __ovld __cnfn convert_half_rte(double);
+half __ovld __cnfn convert_half_rtp(double);
+half __ovld __cnfn convert_half_rtn(double);
+half __ovld __cnfn convert_half_rtz(double);
+half2 __ovld __cnfn convert_half2(double2);
+half2 __ovld __cnfn convert_half2_rte(double2);
+half2 __ovld __cnfn convert_half2_rtp(double2);
+half2 __ovld __cnfn convert_half2_rtn(double2);
+half2 __ovld __cnfn convert_half2_rtz(double2);
+half3 __ovld __cnfn convert_half3(double3);
+half3 __ovld __cnfn convert_half3_rte(double3);
+half3 __ovld __cnfn convert_half3_rtp(double3);
+half3 __ovld __cnfn convert_half3_rtn(double3);
+half3 __ovld __cnfn convert_half3_rtz(double3);
+half4 __ovld __cnfn convert_half4(double4);
+half4 __ovld __cnfn convert_half4_rte(double4);
+half4 __ovld __cnfn convert_half4_rtp(double4);
+half4 __ovld __cnfn convert_half4_rtn(double4);
+half4 __ovld __cnfn convert_half4_rtz(double4);
+half8 __ovld __cnfn convert_half8(double8);
+half8 __ovld __cnfn convert_half8_rte(double8);
+half8 __ovld __cnfn convert_half8_rtp(double8);
+half8 __ovld __cnfn convert_half8_rtn(double8);
+half8 __ovld __cnfn convert_half8_rtz(double8);
+half16 __ovld __cnfn convert_half16(double16);
+half16 __ovld __cnfn convert_half16_rte(double16);
+half16 __ovld __cnfn convert_half16_rtp(double16);
+half16 __ovld __cnfn convert_half16_rtn(double16);
+half16 __ovld __cnfn convert_half16_rtz(double16);
+#endif //cl_khr_fp64
+
+#endif // cl_khr_fp16
+
+/**
+ * OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators
+ * Reinterprets a data type as another data type of the same size
+ */
+char __ovld __cnfn as_char(char);
+char __ovld __cnfn as_char(uchar);
+
+char2 __ovld __cnfn as_char2(char2);
+char2 __ovld __cnfn as_char2(uchar2);
+char2 __ovld __cnfn as_char2(short);
+char2 __ovld __cnfn as_char2(ushort);
+
+char3 __ovld __cnfn as_char3(char3);
+char3 __ovld __cnfn as_char3(char4);
+char3 __ovld __cnfn as_char3(uchar3);
+char3 __ovld __cnfn as_char3(uchar4);
+char3 __ovld __cnfn as_char3(short2);
+char3 __ovld __cnfn as_char3(ushort2);
+char3 __ovld __cnfn as_char3(int);
+char3 __ovld __cnfn as_char3(uint);
+char3 __ovld __cnfn as_char3(float);
+
+char4 __ovld __cnfn as_char4(char3);
+char4 __ovld __cnfn as_char4(char4);
+char4 __ovld __cnfn as_char4(uchar3);
+char4 __ovld __cnfn as_char4(uchar4);
+char4 __ovld __cnfn as_char4(short2);
+char4 __ovld __cnfn as_char4(ushort2);
+char4 __ovld __cnfn as_char4(int);
+char4 __ovld __cnfn as_char4(uint);
+char4 __ovld __cnfn as_char4(float);
+
+char8 __ovld __cnfn as_char8(char8);
+char8 __ovld __cnfn as_char8(uchar8);
+char8 __ovld __cnfn as_char8(short3);
+char8 __ovld __cnfn as_char8(short4);
+char8 __ovld __cnfn as_char8(ushort3);
+char8 __ovld __cnfn as_char8(ushort4);
+char8 __ovld __cnfn as_char8(int2);
+char8 __ovld __cnfn as_char8(uint2);
+char8 __ovld __cnfn as_char8(long);
+char8 __ovld __cnfn as_char8(ulong);
+char8 __ovld __cnfn as_char8(float2);
+
+char16 __ovld __cnfn as_char16(char16);
+char16 __ovld __cnfn as_char16(uchar16);
+char16 __ovld __cnfn as_char16(short8);
+char16 __ovld __cnfn as_char16(ushort8);
+char16 __ovld __cnfn as_char16(int3);
+char16 __ovld __cnfn as_char16(int4);
+char16 __ovld __cnfn as_char16(uint3);
+char16 __ovld __cnfn as_char16(uint4);
+char16 __ovld __cnfn as_char16(long2);
+char16 __ovld __cnfn as_char16(ulong2);
+char16 __ovld __cnfn as_char16(float3);
+char16 __ovld __cnfn as_char16(float4);
+
+uchar __ovld __cnfn as_uchar(char);
+uchar __ovld __cnfn as_uchar(uchar);
+
+uchar2 __ovld __cnfn as_uchar2(char2);
+uchar2 __ovld __cnfn as_uchar2(uchar2);
+uchar2 __ovld __cnfn as_uchar2(short);
+uchar2 __ovld __cnfn as_uchar2(ushort);
+
+uchar3 __ovld __cnfn as_uchar3(char3);
+uchar3 __ovld __cnfn as_uchar3(char4);
+uchar3 __ovld __cnfn as_uchar3(uchar3);
+uchar3 __ovld __cnfn as_uchar3(uchar4);
+uchar3 __ovld __cnfn as_uchar3(short2);
+uchar3 __ovld __cnfn as_uchar3(ushort2);
+uchar3 __ovld __cnfn as_uchar3(int);
+uchar3 __ovld __cnfn as_uchar3(uint);
+uchar3 __ovld __cnfn as_uchar3(float);
+
+uchar4 __ovld __cnfn as_uchar4(char3);
+uchar4 __ovld __cnfn as_uchar4(char4);
+uchar4 __ovld __cnfn as_uchar4(uchar3);
+uchar4 __ovld __cnfn as_uchar4(uchar4);
+uchar4 __ovld __cnfn as_uchar4(short2);
+uchar4 __ovld __cnfn as_uchar4(ushort2);
+uchar4 __ovld __cnfn as_uchar4(int);
+uchar4 __ovld __cnfn as_uchar4(uint);
+uchar4 __ovld __cnfn as_uchar4(float);
+
+uchar8 __ovld __cnfn as_uchar8(char8);
+uchar8 __ovld __cnfn as_uchar8(uchar8);
+uchar8 __ovld __cnfn as_uchar8(short3);
+uchar8 __ovld __cnfn as_uchar8(short4);
+uchar8 __ovld __cnfn as_uchar8(ushort3);
+uchar8 __ovld __cnfn as_uchar8(ushort4);
+uchar8 __ovld __cnfn as_uchar8(int2);
+uchar8 __ovld __cnfn as_uchar8(uint2);
+uchar8 __ovld __cnfn as_uchar8(long);
+uchar8 __ovld __cnfn as_uchar8(ulong);
+uchar8 __ovld __cnfn as_uchar8(float2);
+
+uchar16 __ovld __cnfn as_uchar16(char16);
+uchar16 __ovld __cnfn as_uchar16(uchar16);
+uchar16 __ovld __cnfn as_uchar16(short8);
+uchar16 __ovld __cnfn as_uchar16(ushort8);
+uchar16 __ovld __cnfn as_uchar16(int3);
+uchar16 __ovld __cnfn as_uchar16(int4);
+uchar16 __ovld __cnfn as_uchar16(uint3);
+uchar16 __ovld __cnfn as_uchar16(uint4);
+uchar16 __ovld __cnfn as_uchar16(long2);
+uchar16 __ovld __cnfn as_uchar16(ulong2);
+uchar16 __ovld __cnfn as_uchar16(float3);
+uchar16 __ovld __cnfn as_uchar16(float4);
+
+short __ovld __cnfn as_short(char2);
+short __ovld __cnfn as_short(uchar2);
+short __ovld __cnfn as_short(short);
+short __ovld __cnfn as_short(ushort);
+
+short2 __ovld __cnfn as_short2(char3);
+short2 __ovld __cnfn as_short2(char4);
+short2 __ovld __cnfn as_short2(uchar3);
+short2 __ovld __cnfn as_short2(uchar4);
+short2 __ovld __cnfn as_short2(short2);
+short2 __ovld __cnfn as_short2(ushort2);
+short2 __ovld __cnfn as_short2(int);
+short2 __ovld __cnfn as_short2(uint);
+short2 __ovld __cnfn as_short2(float);
+
+short3 __ovld __cnfn as_short3(char8);
+short3 __ovld __cnfn as_short3(uchar8);
+short3 __ovld __cnfn as_short3(short3);
+short3 __ovld __cnfn as_short3(short4);
+short3 __ovld __cnfn as_short3(ushort3);
+short3 __ovld __cnfn as_short3(ushort4);
+short3 __ovld __cnfn as_short3(int2);
+short3 __ovld __cnfn as_short3(uint2);
+short3 __ovld __cnfn as_short3(long);
+short3 __ovld __cnfn as_short3(ulong);
+short3 __ovld __cnfn as_short3(float2);
+
+short4 __ovld __cnfn as_short4(char8);
+short4 __ovld __cnfn as_short4(uchar8);
+short4 __ovld __cnfn as_short4(short3);
+short4 __ovld __cnfn as_short4(short4);
+short4 __ovld __cnfn as_short4(ushort3);
+short4 __ovld __cnfn as_short4(ushort4);
+short4 __ovld __cnfn as_short4(int2);
+short4 __ovld __cnfn as_short4(uint2);
+short4 __ovld __cnfn as_short4(long);
+short4 __ovld __cnfn as_short4(ulong);
+short4 __ovld __cnfn as_short4(float2);
+
+short8 __ovld __cnfn as_short8(char16);
+short8 __ovld __cnfn as_short8(uchar16);
+short8 __ovld __cnfn as_short8(short8);
+short8 __ovld __cnfn as_short8(ushort8);
+short8 __ovld __cnfn as_short8(int3);
+short8 __ovld __cnfn as_short8(int4);
+short8 __ovld __cnfn as_short8(uint3);
+short8 __ovld __cnfn as_short8(uint4);
+short8 __ovld __cnfn as_short8(long2);
+short8 __ovld __cnfn as_short8(ulong2);
+short8 __ovld __cnfn as_short8(float3);
+short8 __ovld __cnfn as_short8(float4);
+
+short16 __ovld __cnfn as_short16(short16);
+short16 __ovld __cnfn as_short16(ushort16);
+short16 __ovld __cnfn as_short16(int8);
+short16 __ovld __cnfn as_short16(uint8);
+short16 __ovld __cnfn as_short16(long3);
+short16 __ovld __cnfn as_short16(long4);
+short16 __ovld __cnfn as_short16(ulong3);
+short16 __ovld __cnfn as_short16(ulong4);
+short16 __ovld __cnfn as_short16(float8);
+
+ushort __ovld __cnfn as_ushort(char2);
+ushort __ovld __cnfn as_ushort(uchar2);
+ushort __ovld __cnfn as_ushort(short);
+ushort __ovld __cnfn as_ushort(ushort);
+
+ushort2 __ovld __cnfn as_ushort2(char3);
+ushort2 __ovld __cnfn as_ushort2(char4);
+ushort2 __ovld __cnfn as_ushort2(uchar3);
+ushort2 __ovld __cnfn as_ushort2(uchar4);
+ushort2 __ovld __cnfn as_ushort2(short2);
+ushort2 __ovld __cnfn as_ushort2(ushort2);
+ushort2 __ovld __cnfn as_ushort2(int);
+ushort2 __ovld __cnfn as_ushort2(uint);
+ushort2 __ovld __cnfn as_ushort2(float);
+
+ushort3 __ovld __cnfn as_ushort3(char8);
+ushort3 __ovld __cnfn as_ushort3(uchar8);
+ushort3 __ovld __cnfn as_ushort3(short3);
+ushort3 __ovld __cnfn as_ushort3(short4);
+ushort3 __ovld __cnfn as_ushort3(ushort3);
+ushort3 __ovld __cnfn as_ushort3(ushort4);
+ushort3 __ovld __cnfn as_ushort3(int2);
+ushort3 __ovld __cnfn as_ushort3(uint2);
+ushort3 __ovld __cnfn as_ushort3(long);
+ushort3 __ovld __cnfn as_ushort3(ulong);
+ushort3 __ovld __cnfn as_ushort3(float2);
+
+ushort4 __ovld __cnfn as_ushort4(char8);
+ushort4 __ovld __cnfn as_ushort4(uchar8);
+ushort4 __ovld __cnfn as_ushort4(short3);
+ushort4 __ovld __cnfn as_ushort4(short4);
+ushort4 __ovld __cnfn as_ushort4(ushort3);
+ushort4 __ovld __cnfn as_ushort4(ushort4);
+ushort4 __ovld __cnfn as_ushort4(int2);
+ushort4 __ovld __cnfn as_ushort4(uint2);
+ushort4 __ovld __cnfn as_ushort4(long);
+ushort4 __ovld __cnfn as_ushort4(ulong);
+ushort4 __ovld __cnfn as_ushort4(float2);
+
+ushort8 __ovld __cnfn as_ushort8(char16);
+ushort8 __ovld __cnfn as_ushort8(uchar16);
+ushort8 __ovld __cnfn as_ushort8(short8);
+ushort8 __ovld __cnfn as_ushort8(ushort8);
+ushort8 __ovld __cnfn as_ushort8(int3);
+ushort8 __ovld __cnfn as_ushort8(int4);
+ushort8 __ovld __cnfn as_ushort8(uint3);
+ushort8 __ovld __cnfn as_ushort8(uint4);
+ushort8 __ovld __cnfn as_ushort8(long2);
+ushort8 __ovld __cnfn as_ushort8(ulong2);
+ushort8 __ovld __cnfn as_ushort8(float3);
+ushort8 __ovld __cnfn as_ushort8(float4);
+
+ushort16 __ovld __cnfn as_ushort16(short16);
+ushort16 __ovld __cnfn as_ushort16(ushort16);
+ushort16 __ovld __cnfn as_ushort16(int8);
+ushort16 __ovld __cnfn as_ushort16(uint8);
+ushort16 __ovld __cnfn as_ushort16(long3);
+ushort16 __ovld __cnfn as_ushort16(long4);
+ushort16 __ovld __cnfn as_ushort16(ulong3);
+ushort16 __ovld __cnfn as_ushort16(ulong4);
+ushort16 __ovld __cnfn as_ushort16(float8);
+
+int __ovld __cnfn as_int(char3);
+int __ovld __cnfn as_int(char4);
+int __ovld __cnfn as_int(uchar3);
+int __ovld __cnfn as_int(uchar4);
+int __ovld __cnfn as_int(short2);
+int __ovld __cnfn as_int(ushort2);
+int __ovld __cnfn as_int(int);
+int __ovld __cnfn as_int(uint);
+int __ovld __cnfn as_int(float);
+
+int2 __ovld __cnfn as_int2(char8);
+int2 __ovld __cnfn as_int2(uchar8);
+int2 __ovld __cnfn as_int2(short3);
+int2 __ovld __cnfn as_int2(short4);
+int2 __ovld __cnfn as_int2(ushort3);
+int2 __ovld __cnfn as_int2(ushort4);
+int2 __ovld __cnfn as_int2(int2);
+int2 __ovld __cnfn as_int2(uint2);
+int2 __ovld __cnfn as_int2(long);
+int2 __ovld __cnfn as_int2(ulong);
+int2 __ovld __cnfn as_int2(float2);
+
+int3 __ovld __cnfn as_int3(char16);
+int3 __ovld __cnfn as_int3(uchar16);
+int3 __ovld __cnfn as_int3(short8);
+int3 __ovld __cnfn as_int3(ushort8);
+int3 __ovld __cnfn as_int3(int3);
+int3 __ovld __cnfn as_int3(int4);
+int3 __ovld __cnfn as_int3(uint3);
+int3 __ovld __cnfn as_int3(uint4);
+int3 __ovld __cnfn as_int3(long2);
+int3 __ovld __cnfn as_int3(ulong2);
+int3 __ovld __cnfn as_int3(float3);
+int3 __ovld __cnfn as_int3(float4);
+
+int4 __ovld __cnfn as_int4(char16);
+int4 __ovld __cnfn as_int4(uchar16);
+int4 __ovld __cnfn as_int4(short8);
+int4 __ovld __cnfn as_int4(ushort8);
+int4 __ovld __cnfn as_int4(int3);
+int4 __ovld __cnfn as_int4(int4);
+int4 __ovld __cnfn as_int4(uint3);
+int4 __ovld __cnfn as_int4(uint4);
+int4 __ovld __cnfn as_int4(long2);
+int4 __ovld __cnfn as_int4(ulong2);
+int4 __ovld __cnfn as_int4(float3);
+int4 __ovld __cnfn as_int4(float4);
+
+int8 __ovld __cnfn as_int8(short16);
+int8 __ovld __cnfn as_int8(ushort16);
+int8 __ovld __cnfn as_int8(int8);
+int8 __ovld __cnfn as_int8(uint8);
+int8 __ovld __cnfn as_int8(long3);
+int8 __ovld __cnfn as_int8(long4);
+int8 __ovld __cnfn as_int8(ulong3);
+int8 __ovld __cnfn as_int8(ulong4);
+int8 __ovld __cnfn as_int8(float8);
+
+int16 __ovld __cnfn as_int16(int16);
+int16 __ovld __cnfn as_int16(uint16);
+int16 __ovld __cnfn as_int16(long8);
+int16 __ovld __cnfn as_int16(ulong8);
+int16 __ovld __cnfn as_int16(float16);
+
+uint __ovld __cnfn as_uint(char3);
+uint __ovld __cnfn as_uint(char4);
+uint __ovld __cnfn as_uint(uchar3);
+uint __ovld __cnfn as_uint(uchar4);
+uint __ovld __cnfn as_uint(short2);
+uint __ovld __cnfn as_uint(ushort2);
+uint __ovld __cnfn as_uint(int);
+uint __ovld __cnfn as_uint(uint);
+uint __ovld __cnfn as_uint(float);
+
+uint2 __ovld __cnfn as_uint2(char8);
+uint2 __ovld __cnfn as_uint2(uchar8);
+uint2 __ovld __cnfn as_uint2(short3);
+uint2 __ovld __cnfn as_uint2(short4);
+uint2 __ovld __cnfn as_uint2(ushort3);
+uint2 __ovld __cnfn as_uint2(ushort4);
+uint2 __ovld __cnfn as_uint2(int2);
+uint2 __ovld __cnfn as_uint2(uint2);
+uint2 __ovld __cnfn as_uint2(long);
+uint2 __ovld __cnfn as_uint2(ulong);
+uint2 __ovld __cnfn as_uint2(float2);
+
+uint3 __ovld __cnfn as_uint3(char16);
+uint3 __ovld __cnfn as_uint3(uchar16);
+uint3 __ovld __cnfn as_uint3(short8);
+uint3 __ovld __cnfn as_uint3(ushort8);
+uint3 __ovld __cnfn as_uint3(int3);
+uint3 __ovld __cnfn as_uint3(int4);
+uint3 __ovld __cnfn as_uint3(uint3);
+uint3 __ovld __cnfn as_uint3(uint4);
+uint3 __ovld __cnfn as_uint3(long2);
+uint3 __ovld __cnfn as_uint3(ulong2);
+uint3 __ovld __cnfn as_uint3(float3);
+uint3 __ovld __cnfn as_uint3(float4);
+
+uint4 __ovld __cnfn as_uint4(char16);
+uint4 __ovld __cnfn as_uint4(uchar16);
+uint4 __ovld __cnfn as_uint4(short8);
+uint4 __ovld __cnfn as_uint4(ushort8);
+uint4 __ovld __cnfn as_uint4(int3);
+uint4 __ovld __cnfn as_uint4(int4);
+uint4 __ovld __cnfn as_uint4(uint3);
+uint4 __ovld __cnfn as_uint4(uint4);
+uint4 __ovld __cnfn as_uint4(long2);
+uint4 __ovld __cnfn as_uint4(ulong2);
+uint4 __ovld __cnfn as_uint4(float3);
+uint4 __ovld __cnfn as_uint4(float4);
+
+uint8 __ovld __cnfn as_uint8(short16);
+uint8 __ovld __cnfn as_uint8(ushort16);
+uint8 __ovld __cnfn as_uint8(int8);
+uint8 __ovld __cnfn as_uint8(uint8);
+uint8 __ovld __cnfn as_uint8(long3);
+uint8 __ovld __cnfn as_uint8(long4);
+uint8 __ovld __cnfn as_uint8(ulong3);
+uint8 __ovld __cnfn as_uint8(ulong4);
+uint8 __ovld __cnfn as_uint8(float8);
+
+uint16 __ovld __cnfn as_uint16(int16);
+uint16 __ovld __cnfn as_uint16(uint16);
+uint16 __ovld __cnfn as_uint16(long8);
+uint16 __ovld __cnfn as_uint16(ulong8);
+uint16 __ovld __cnfn as_uint16(float16);
+
+long __ovld __cnfn as_long(char8);
+long __ovld __cnfn as_long(uchar8);
+long __ovld __cnfn as_long(short3);
+long __ovld __cnfn as_long(short4);
+long __ovld __cnfn as_long(ushort3);
+long __ovld __cnfn as_long(ushort4);
+long __ovld __cnfn as_long(int2);
+long __ovld __cnfn as_long(uint2);
+long __ovld __cnfn as_long(long);
+long __ovld __cnfn as_long(ulong);
+long __ovld __cnfn as_long(float2);
+
+long2 __ovld __cnfn as_long2(char16);
+long2 __ovld __cnfn as_long2(uchar16);
+long2 __ovld __cnfn as_long2(short8);
+long2 __ovld __cnfn as_long2(ushort8);
+long2 __ovld __cnfn as_long2(int3);
+long2 __ovld __cnfn as_long2(int4);
+long2 __ovld __cnfn as_long2(uint3);
+long2 __ovld __cnfn as_long2(uint4);
+long2 __ovld __cnfn as_long2(long2);
+long2 __ovld __cnfn as_long2(ulong2);
+long2 __ovld __cnfn as_long2(float3);
+long2 __ovld __cnfn as_long2(float4);
+
+long3 __ovld __cnfn as_long3(short16);
+long3 __ovld __cnfn as_long3(ushort16);
+long3 __ovld __cnfn as_long3(int8);
+long3 __ovld __cnfn as_long3(uint8);
+long3 __ovld __cnfn as_long3(long3);
+long3 __ovld __cnfn as_long3(long4);
+long3 __ovld __cnfn as_long3(ulong3);
+long3 __ovld __cnfn as_long3(ulong4);
+long3 __ovld __cnfn as_long3(float8);
+
+long4 __ovld __cnfn as_long4(short16);
+long4 __ovld __cnfn as_long4(ushort16);
+long4 __ovld __cnfn as_long4(int8);
+long4 __ovld __cnfn as_long4(uint8);
+long4 __ovld __cnfn as_long4(long3);
+long4 __ovld __cnfn as_long4(long4);
+long4 __ovld __cnfn as_long4(ulong3);
+long4 __ovld __cnfn as_long4(ulong4);
+long4 __ovld __cnfn as_long4(float8);
+
+long8 __ovld __cnfn as_long8(int16);
+long8 __ovld __cnfn as_long8(uint16);
+long8 __ovld __cnfn as_long8(long8);
+long8 __ovld __cnfn as_long8(ulong8);
+long8 __ovld __cnfn as_long8(float16);
+
+long16 __ovld __cnfn as_long16(long16);
+long16 __ovld __cnfn as_long16(ulong16);
+
+ulong __ovld __cnfn as_ulong(char8);
+ulong __ovld __cnfn as_ulong(uchar8);
+ulong __ovld __cnfn as_ulong(short3);
+ulong __ovld __cnfn as_ulong(short4);
+ulong __ovld __cnfn as_ulong(ushort3);
+ulong __ovld __cnfn as_ulong(ushort4);
+ulong __ovld __cnfn as_ulong(int2);
+ulong __ovld __cnfn as_ulong(uint2);
+ulong __ovld __cnfn as_ulong(long);
+ulong __ovld __cnfn as_ulong(ulong);
+ulong __ovld __cnfn as_ulong(float2);
+
+ulong2 __ovld __cnfn as_ulong2(char16);
+ulong2 __ovld __cnfn as_ulong2(uchar16);
+ulong2 __ovld __cnfn as_ulong2(short8);
+ulong2 __ovld __cnfn as_ulong2(ushort8);
+ulong2 __ovld __cnfn as_ulong2(int3);
+ulong2 __ovld __cnfn as_ulong2(int4);
+ulong2 __ovld __cnfn as_ulong2(uint3);
+ulong2 __ovld __cnfn as_ulong2(uint4);
+ulong2 __ovld __cnfn as_ulong2(long2);
+ulong2 __ovld __cnfn as_ulong2(ulong2);
+ulong2 __ovld __cnfn as_ulong2(float3);
+ulong2 __ovld __cnfn as_ulong2(float4);
+
+ulong3 __ovld __cnfn as_ulong3(short16);
+ulong3 __ovld __cnfn as_ulong3(ushort16);
+ulong3 __ovld __cnfn as_ulong3(int8);
+ulong3 __ovld __cnfn as_ulong3(uint8);
+ulong3 __ovld __cnfn as_ulong3(long3);
+ulong3 __ovld __cnfn as_ulong3(long4);
+ulong3 __ovld __cnfn as_ulong3(ulong3);
+ulong3 __ovld __cnfn as_ulong3(ulong4);
+ulong3 __ovld __cnfn as_ulong3(float8);
+
+ulong4 __ovld __cnfn as_ulong4(short16);
+ulong4 __ovld __cnfn as_ulong4(ushort16);
+ulong4 __ovld __cnfn as_ulong4(int8);
+ulong4 __ovld __cnfn as_ulong4(uint8);
+ulong4 __ovld __cnfn as_ulong4(long3);
+ulong4 __ovld __cnfn as_ulong4(long4);
+ulong4 __ovld __cnfn as_ulong4(ulong3);
+ulong4 __ovld __cnfn as_ulong4(ulong4);
+ulong4 __ovld __cnfn as_ulong4(float8);
+
+ulong8 __ovld __cnfn as_ulong8(int16);
+ulong8 __ovld __cnfn as_ulong8(uint16);
+ulong8 __ovld __cnfn as_ulong8(long8);
+ulong8 __ovld __cnfn as_ulong8(ulong8);
+ulong8 __ovld __cnfn as_ulong8(float16);
+
+ulong16 __ovld __cnfn as_ulong16(long16);
+ulong16 __ovld __cnfn as_ulong16(ulong16);
+
+float __ovld __cnfn as_float(char3);
+float __ovld __cnfn as_float(char4);
+float __ovld __cnfn as_float(uchar3);
+float __ovld __cnfn as_float(uchar4);
+float __ovld __cnfn as_float(short2);
+float __ovld __cnfn as_float(ushort2);
+float __ovld __cnfn as_float(int);
+float __ovld __cnfn as_float(uint);
+float __ovld __cnfn as_float(float);
+
+float2 __ovld __cnfn as_float2(char8);
+float2 __ovld __cnfn as_float2(uchar8);
+float2 __ovld __cnfn as_float2(short3);
+float2 __ovld __cnfn as_float2(short4);
+float2 __ovld __cnfn as_float2(ushort3);
+float2 __ovld __cnfn as_float2(ushort4);
+float2 __ovld __cnfn as_float2(int2);
+float2 __ovld __cnfn as_float2(uint2);
+float2 __ovld __cnfn as_float2(long);
+float2 __ovld __cnfn as_float2(ulong);
+float2 __ovld __cnfn as_float2(float2);
+
+float3 __ovld __cnfn as_float3(char16);
+float3 __ovld __cnfn as_float3(uchar16);
+float3 __ovld __cnfn as_float3(short8);
+float3 __ovld __cnfn as_float3(ushort8);
+float3 __ovld __cnfn as_float3(int3);
+float3 __ovld __cnfn as_float3(int4);
+float3 __ovld __cnfn as_float3(uint3);
+float3 __ovld __cnfn as_float3(uint4);
+float3 __ovld __cnfn as_float3(long2);
+float3 __ovld __cnfn as_float3(ulong2);
+float3 __ovld __cnfn as_float3(float3);
+float3 __ovld __cnfn as_float3(float4);
+
+float4 __ovld __cnfn as_float4(char16);
+float4 __ovld __cnfn as_float4(uchar16);
+float4 __ovld __cnfn as_float4(short8);
+float4 __ovld __cnfn as_float4(ushort8);
+float4 __ovld __cnfn as_float4(int3);
+float4 __ovld __cnfn as_float4(int4);
+float4 __ovld __cnfn as_float4(uint3);
+float4 __ovld __cnfn as_float4(uint4);
+float4 __ovld __cnfn as_float4(long2);
+float4 __ovld __cnfn as_float4(ulong2);
+float4 __ovld __cnfn as_float4(float3);
+float4 __ovld __cnfn as_float4(float4);
+
+float8 __ovld __cnfn as_float8(short16);
+float8 __ovld __cnfn as_float8(ushort16);
+float8 __ovld __cnfn as_float8(int8);
+float8 __ovld __cnfn as_float8(uint8);
+float8 __ovld __cnfn as_float8(long3);
+float8 __ovld __cnfn as_float8(long4);
+float8 __ovld __cnfn as_float8(ulong3);
+float8 __ovld __cnfn as_float8(ulong4);
+float8 __ovld __cnfn as_float8(float8);
+
+float16 __ovld __cnfn as_float16(int16);
+float16 __ovld __cnfn as_float16(uint16);
+float16 __ovld __cnfn as_float16(long8);
+float16 __ovld __cnfn as_float16(ulong8);
+float16 __ovld __cnfn as_float16(float16);
+
+#ifdef cl_khr_fp64
+char8 __ovld __cnfn as_char8(double);
+char16 __ovld __cnfn as_char16(double2);
+uchar8 __ovld __cnfn as_uchar8(double);
+uchar16 __ovld __cnfn as_uchar16(double2);
+short3 __ovld __cnfn as_short3(double);
+short4 __ovld __cnfn as_short4(double);
+short8 __ovld __cnfn as_short8(double2);
+short16 __ovld __cnfn as_short16(double3);
+short16 __ovld __cnfn as_short16(double4);
+ushort3 __ovld __cnfn as_ushort3(double);
+ushort4 __ovld __cnfn as_ushort4(double);
+ushort8 __ovld __cnfn as_ushort8(double2);
+ushort16 __ovld __cnfn as_ushort16(double3);
+ushort16 __ovld __cnfn as_ushort16(double4);
+int2 __ovld __cnfn as_int2(double);
+int3 __ovld __cnfn as_int3(double2);
+int4 __ovld __cnfn as_int4(double2);
+int8 __ovld __cnfn as_int8(double3);
+int8 __ovld __cnfn as_int8(double4);
+int16 __ovld __cnfn as_int16(double8);
+uint2 __ovld __cnfn as_uint2(double);
+uint3 __ovld __cnfn as_uint3(double2);
+uint4 __ovld __cnfn as_uint4(double2);
+uint8 __ovld __cnfn as_uint8(double3);
+uint8 __ovld __cnfn as_uint8(double4);
+uint16 __ovld __cnfn as_uint16(double8);
+long __ovld __cnfn as_long(double);
+long2 __ovld __cnfn as_long2(double2);
+long3 __ovld __cnfn as_long3(double3);
+long3 __ovld __cnfn as_long3(double4);
+long4 __ovld __cnfn as_long4(double3);
+long4 __ovld __cnfn as_long4(double4);
+long8 __ovld __cnfn as_long8(double8);
+long16 __ovld __cnfn as_long16(double16);
+ulong __ovld __cnfn as_ulong(double);
+ulong2 __ovld __cnfn as_ulong2(double2);
+ulong3 __ovld __cnfn as_ulong3(double3);
+ulong3 __ovld __cnfn as_ulong3(double4);
+ulong4 __ovld __cnfn as_ulong4(double3);
+ulong4 __ovld __cnfn as_ulong4(double4);
+ulong8 __ovld __cnfn as_ulong8(double8);
+ulong16 __ovld __cnfn as_ulong16(double16);
+float2 __ovld __cnfn as_float2(double);
+float3 __ovld __cnfn as_float3(double2);
+float4 __ovld __cnfn as_float4(double2);
+float8 __ovld __cnfn as_float8(double3);
+float8 __ovld __cnfn as_float8(double4);
+float16 __ovld __cnfn as_float16(double8);
+double __ovld __cnfn as_double(char8);
+double __ovld __cnfn as_double(uchar8);
+double __ovld __cnfn as_double(short3);
+double __ovld __cnfn as_double(short4);
+double __ovld __cnfn as_double(ushort3);
+double __ovld __cnfn as_double(ushort4);
+double __ovld __cnfn as_double(int2);
+double __ovld __cnfn as_double(uint2);
+double __ovld __cnfn as_double(long);
+double __ovld __cnfn as_double(ulong);
+double __ovld __cnfn as_double(float2);
+double __ovld __cnfn as_double(double);
+double2 __ovld __cnfn as_double2(char16);
+double2 __ovld __cnfn as_double2(uchar16);
+double2 __ovld __cnfn as_double2(short8);
+double2 __ovld __cnfn as_double2(ushort8);
+double2 __ovld __cnfn as_double2(int3);
+double2 __ovld __cnfn as_double2(int4);
+double2 __ovld __cnfn as_double2(uint3);
+double2 __ovld __cnfn as_double2(uint4);
+double2 __ovld __cnfn as_double2(long2);
+double2 __ovld __cnfn as_double2(ulong2);
+double2 __ovld __cnfn as_double2(float3);
+double2 __ovld __cnfn as_double2(float4);
+double2 __ovld __cnfn as_double2(double2);
+double3 __ovld __cnfn as_double3(short16);
+double3 __ovld __cnfn as_double3(ushort16);
+double3 __ovld __cnfn as_double3(int8);
+double3 __ovld __cnfn as_double3(uint8);
+double3 __ovld __cnfn as_double3(long3);
+double3 __ovld __cnfn as_double3(long4);
+double3 __ovld __cnfn as_double3(ulong3);
+double3 __ovld __cnfn as_double3(ulong4);
+double3 __ovld __cnfn as_double3(float8);
+double3 __ovld __cnfn as_double3(double3);
+double3 __ovld __cnfn as_double3(double4);
+double4 __ovld __cnfn as_double4(short16);
+double4 __ovld __cnfn as_double4(ushort16);
+double4 __ovld __cnfn as_double4(int8);
+double4 __ovld __cnfn as_double4(uint8);
+double4 __ovld __cnfn as_double4(long3);
+double4 __ovld __cnfn as_double4(long4);
+double4 __ovld __cnfn as_double4(ulong3);
+double4 __ovld __cnfn as_double4(ulong4);
+double4 __ovld __cnfn as_double4(float8);
+double4 __ovld __cnfn as_double4(double3);
+double4 __ovld __cnfn as_double4(double4);
+double8 __ovld __cnfn as_double8(int16);
+double8 __ovld __cnfn as_double8(uint16);
+double8 __ovld __cnfn as_double8(long8);
+double8 __ovld __cnfn as_double8(ulong8);
+double8 __ovld __cnfn as_double8(float16);
+double8 __ovld __cnfn as_double8(double8);
+double16 __ovld __cnfn as_double16(long16);
+double16 __ovld __cnfn as_double16(ulong16);
+double16 __ovld __cnfn as_double16(double16);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+char2 __ovld __cnfn as_char2(half);
+char3 __ovld __cnfn as_char3(half2);
+char4 __ovld __cnfn as_char4(half2);
+char8 __ovld __cnfn as_char8(half3);
+char8 __ovld __cnfn as_char8(half4);
+char16 __ovld __cnfn as_char16(half8);
+uchar2 __ovld __cnfn as_uchar2(half);
+uchar3 __ovld __cnfn as_uchar3(half2);
+uchar4 __ovld __cnfn as_uchar4(half2);
+uchar8 __ovld __cnfn as_uchar8(half3);
+uchar8 __ovld __cnfn as_uchar8(half4);
+uchar16 __ovld __cnfn as_uchar16(half8);
+short __ovld __cnfn as_short(half);
+short2 __ovld __cnfn as_short2(half2);
+short3 __ovld __cnfn as_short3(half3);
+short3 __ovld __cnfn as_short3(half4);
+short4 __ovld __cnfn as_short4(half3);
+short4 __ovld __cnfn as_short4(half4);
+short8 __ovld __cnfn as_short8(half8);
+short16 __ovld __cnfn as_short16(half16);
+ushort __ovld __cnfn as_ushort(half);
+ushort2 __ovld __cnfn as_ushort2(half2);
+ushort3 __ovld __cnfn as_ushort3(half3);
+ushort3 __ovld __cnfn as_ushort3(half4);
+ushort4 __ovld __cnfn as_ushort4(half3);
+ushort4 __ovld __cnfn as_ushort4(half4);
+ushort8 __ovld __cnfn as_ushort8(half8);
+ushort16 __ovld __cnfn as_ushort16(half16);
+int __ovld __cnfn as_int(half2);
+int2 __ovld __cnfn as_int2(half3);
+int2 __ovld __cnfn as_int2(half4);
+int3 __ovld __cnfn as_int3(half8);
+int4 __ovld __cnfn as_int4(half8);
+int8 __ovld __cnfn as_int8(half16);
+uint __ovld __cnfn as_uint(half2);
+uint2 __ovld __cnfn as_uint2(half3);
+uint2 __ovld __cnfn as_uint2(half4);
+uint3 __ovld __cnfn as_uint3(half8);
+uint4 __ovld __cnfn as_uint4(half8);
+uint8 __ovld __cnfn as_uint8(half16);
+long __ovld __cnfn as_long(half3);
+long __ovld __cnfn as_long(half4);
+long2 __ovld __cnfn as_long2(half8);
+long3 __ovld __cnfn as_long3(half16);
+long4 __ovld __cnfn as_long4(half16);
+ulong __ovld __cnfn as_ulong(half3);
+ulong __ovld __cnfn as_ulong(half4);
+ulong2 __ovld __cnfn as_ulong2(half8);
+ulong3 __ovld __cnfn as_ulong3(half16);
+ulong4 __ovld __cnfn as_ulong4(half16);
+half __ovld __cnfn as_half(char2);
+half __ovld __cnfn as_half(uchar2);
+half __ovld __cnfn as_half(short);
+half __ovld __cnfn as_half(ushort);
+half __ovld __cnfn as_half(half);
+half2 __ovld __cnfn as_half2(char3);
+half2 __ovld __cnfn as_half2(char4);
+half2 __ovld __cnfn as_half2(uchar3);
+half2 __ovld __cnfn as_half2(uchar4);
+half2 __ovld __cnfn as_half2(short2);
+half2 __ovld __cnfn as_half2(ushort2);
+half2 __ovld __cnfn as_half2(int);
+half2 __ovld __cnfn as_half2(uint);
+half2 __ovld __cnfn as_half2(half2);
+half2 __ovld __cnfn as_half2(float);
+half3 __ovld __cnfn as_half3(char8);
+half3 __ovld __cnfn as_half3(uchar8);
+half3 __ovld __cnfn as_half3(short3);
+half3 __ovld __cnfn as_half3(short4);
+half3 __ovld __cnfn as_half3(ushort3);
+half3 __ovld __cnfn as_half3(ushort4);
+half3 __ovld __cnfn as_half3(int2);
+half3 __ovld __cnfn as_half3(uint2);
+half3 __ovld __cnfn as_half3(long);
+half3 __ovld __cnfn as_half3(ulong);
+half3 __ovld __cnfn as_half3(half3);
+half3 __ovld __cnfn as_half3(half4);
+half3 __ovld __cnfn as_half3(float2);
+half4 __ovld __cnfn as_half4(char8);
+half4 __ovld __cnfn as_half4(uchar8);
+half4 __ovld __cnfn as_half4(short3);
+half4 __ovld __cnfn as_half4(short4);
+half4 __ovld __cnfn as_half4(ushort3);
+half4 __ovld __cnfn as_half4(ushort4);
+half4 __ovld __cnfn as_half4(int2);
+half4 __ovld __cnfn as_half4(uint2);
+half4 __ovld __cnfn as_half4(long);
+half4 __ovld __cnfn as_half4(ulong);
+half4 __ovld __cnfn as_half4(half3);
+half4 __ovld __cnfn as_half4(half4);
+half4 __ovld __cnfn as_half4(float2);
+half8 __ovld __cnfn as_half8(char16);
+half8 __ovld __cnfn as_half8(uchar16);
+half8 __ovld __cnfn as_half8(short8);
+half8 __ovld __cnfn as_half8(ushort8);
+half8 __ovld __cnfn as_half8(int3);
+half8 __ovld __cnfn as_half8(int4);
+half8 __ovld __cnfn as_half8(uint3);
+half8 __ovld __cnfn as_half8(uint4);
+half8 __ovld __cnfn as_half8(long2);
+half8 __ovld __cnfn as_half8(ulong2);
+half8 __ovld __cnfn as_half8(half8);
+half8 __ovld __cnfn as_half8(float3);
+half8 __ovld __cnfn as_half8(float4);
+half16 __ovld __cnfn as_half16(short16);
+half16 __ovld __cnfn as_half16(ushort16);
+half16 __ovld __cnfn as_half16(int8);
+half16 __ovld __cnfn as_half16(uint8);
+half16 __ovld __cnfn as_half16(long3);
+half16 __ovld __cnfn as_half16(long4);
+half16 __ovld __cnfn as_half16(ulong3);
+half16 __ovld __cnfn as_half16(ulong4);
+half16 __ovld __cnfn as_half16(half16);
+half16 __ovld __cnfn as_half16(float8);
+float __ovld __cnfn as_float(half2);
+float2 __ovld __cnfn as_float2(half3);
+float2 __ovld __cnfn as_float2(half4);
+float3 __ovld __cnfn as_float3(half8);
+float4 __ovld __cnfn as_float4(half8);
+float8 __ovld __cnfn as_float8(half16);
+
+#ifdef cl_khr_fp64
+half3 __ovld __cnfn as_half3(double);
+half4 __ovld __cnfn as_half4(double);
+half8 __ovld __cnfn as_half8(double2);
+half16 __ovld __cnfn as_half16(double3);
+half16 __ovld __cnfn as_half16(double4);
+double __ovld __cnfn as_double(half3);
+double __ovld __cnfn as_double(half4);
+double2 __ovld __cnfn as_double2(half8);
+double3 __ovld __cnfn as_double3(half16);
+double4 __ovld __cnfn as_double4(half16);
+#endif //cl_khr_fp64
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers
+
+#define __kernel_exec(X, typen) __kernel \
+ __attribute__((work_group_size_hint(X, 1, 1))) \
+ __attribute__((vec_type_hint(typen)))
+
+#define kernel_exec(X, typen) __kernel \
+ __attribute__((work_group_size_hint(X, 1, 1))) \
+ __attribute__((vec_type_hint(typen)))
+
+// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions
+
+/**
+ * Returns the number of dimensions in use. This is the
+ * value given to the work_dim argument specified in
+ * clEnqueueNDRangeKernel.
+ * For clEnqueueTask, this returns 1.
+ */
+uint __ovld __cnfn get_work_dim(void);
+
+/**
+ * Returns the number of global work-items specified for
+ * dimension identified by dimindx. This value is given by
+ * the global_work_size argument to
+ * clEnqueueNDRangeKernel. Valid values of dimindx
+ * are 0 to get_work_dim() - 1. For other values of
+ * dimindx, get_global_size() returns 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_global_size(uint dimindx);
+
+/**
+ * Returns the unique global work-item ID value for
+ * dimension identified by dimindx. The global work-item
+ * ID specifies the work-item ID based on the number of
+ * global work-items specified to execute the kernel. Valid
+ * values of dimindx are 0 to get_work_dim() - 1. For
+ * other values of dimindx, get_global_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_global_id(uint dimindx);
+
+/**
+ * Returns the number of local work-items specified in
+ * dimension identified by dimindx. This value is given by
+ * the local_work_size argument to
+ * clEnqueueNDRangeKernel if local_work_size is not
+ * NULL; otherwise the OpenCL implementation chooses
+ * an appropriate local_work_size value which is returned
+ * by this function. Valid values of dimindx are 0 to
+ * get_work_dim() - 1. For other values of dimindx,
+ * get_local_size() returns 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_local_size(uint dimindx);
+
+/**
+ * Returns the unique local work-item ID i.e. a work-item
+ * within a specific work-group for dimension identified by
+ * dimindx. Valid values of dimindx are 0 to
+ * get_work_dim() - 1. For other values of dimindx,
+ * get_local_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_local_id(uint dimindx);
+
+/**
+ * Returns the number of work-groups that will execute a
+ * kernel for dimension identified by dimindx.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values of dimindx, get_num_groups () returns
+ * 1.
+ * For clEnqueueTask, this always returns 1.
+ */
+size_t __ovld __cnfn get_num_groups(uint dimindx);
+
+/**
+ * get_group_id returns the work-group ID which is a
+ * number from 0 .. get_num_groups(dimindx) - 1.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values, get_group_id() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_group_id(uint dimindx);
+
+/**
+ * get_global_offset returns the offset values specified in
+ * global_work_offset argument to
+ * clEnqueueNDRangeKernel.
+ * Valid values of dimindx are 0 to get_work_dim() - 1.
+ * For other values, get_global_offset() returns 0.
+ * For clEnqueueTask, this returns 0.
+ */
+size_t __ovld __cnfn get_global_offset(uint dimindx);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+size_t __ovld get_enqueued_local_size(uint dimindx);
+size_t __ovld get_global_linear_id(void);
+size_t __ovld get_local_linear_id(void);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions
+
+/**
+ * Arc cosine function.
+ */
+float __ovld __cnfn acos(float);
+float2 __ovld __cnfn acos(float2);
+float3 __ovld __cnfn acos(float3);
+float4 __ovld __cnfn acos(float4);
+float8 __ovld __cnfn acos(float8);
+float16 __ovld __cnfn acos(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acos(double);
+double2 __ovld __cnfn acos(double2);
+double3 __ovld __cnfn acos(double3);
+double4 __ovld __cnfn acos(double4);
+double8 __ovld __cnfn acos(double8);
+double16 __ovld __cnfn acos(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acos(half);
+half2 __ovld __cnfn acos(half2);
+half3 __ovld __cnfn acos(half3);
+half4 __ovld __cnfn acos(half4);
+half8 __ovld __cnfn acos(half8);
+half16 __ovld __cnfn acos(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Inverse hyperbolic cosine.
+ */
+float __ovld __cnfn acosh(float);
+float2 __ovld __cnfn acosh(float2);
+float3 __ovld __cnfn acosh(float3);
+float4 __ovld __cnfn acosh(float4);
+float8 __ovld __cnfn acosh(float8);
+float16 __ovld __cnfn acosh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acosh(double);
+double2 __ovld __cnfn acosh(double2);
+double3 __ovld __cnfn acosh(double3);
+double4 __ovld __cnfn acosh(double4);
+double8 __ovld __cnfn acosh(double8);
+double16 __ovld __cnfn acosh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acosh(half);
+half2 __ovld __cnfn acosh(half2);
+half3 __ovld __cnfn acosh(half3);
+half4 __ovld __cnfn acosh(half4);
+half8 __ovld __cnfn acosh(half8);
+half16 __ovld __cnfn acosh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute acos (x) / PI.
+ */
+float __ovld __cnfn acospi(float x);
+float2 __ovld __cnfn acospi(float2 x);
+float3 __ovld __cnfn acospi(float3 x);
+float4 __ovld __cnfn acospi(float4 x);
+float8 __ovld __cnfn acospi(float8 x);
+float16 __ovld __cnfn acospi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn acospi(double x);
+double2 __ovld __cnfn acospi(double2 x);
+double3 __ovld __cnfn acospi(double3 x);
+double4 __ovld __cnfn acospi(double4 x);
+double8 __ovld __cnfn acospi(double8 x);
+double16 __ovld __cnfn acospi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn acospi(half x);
+half2 __ovld __cnfn acospi(half2 x);
+half3 __ovld __cnfn acospi(half3 x);
+half4 __ovld __cnfn acospi(half4 x);
+half8 __ovld __cnfn acospi(half8 x);
+half16 __ovld __cnfn acospi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Arc sine function.
+ */
+float __ovld __cnfn asin(float);
+float2 __ovld __cnfn asin(float2);
+float3 __ovld __cnfn asin(float3);
+float4 __ovld __cnfn asin(float4);
+float8 __ovld __cnfn asin(float8);
+float16 __ovld __cnfn asin(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asin(double);
+double2 __ovld __cnfn asin(double2);
+double3 __ovld __cnfn asin(double3);
+double4 __ovld __cnfn asin(double4);
+double8 __ovld __cnfn asin(double8);
+double16 __ovld __cnfn asin(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asin(half);
+half2 __ovld __cnfn asin(half2);
+half3 __ovld __cnfn asin(half3);
+half4 __ovld __cnfn asin(half4);
+half8 __ovld __cnfn asin(half8);
+half16 __ovld __cnfn asin(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Inverse hyperbolic sine.
+ */
+float __ovld __cnfn asinh(float);
+float2 __ovld __cnfn asinh(float2);
+float3 __ovld __cnfn asinh(float3);
+float4 __ovld __cnfn asinh(float4);
+float8 __ovld __cnfn asinh(float8);
+float16 __ovld __cnfn asinh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asinh(double);
+double2 __ovld __cnfn asinh(double2);
+double3 __ovld __cnfn asinh(double3);
+double4 __ovld __cnfn asinh(double4);
+double8 __ovld __cnfn asinh(double8);
+double16 __ovld __cnfn asinh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asinh(half);
+half2 __ovld __cnfn asinh(half2);
+half3 __ovld __cnfn asinh(half3);
+half4 __ovld __cnfn asinh(half4);
+half8 __ovld __cnfn asinh(half8);
+half16 __ovld __cnfn asinh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute asin (x) / PI.
+ */
+float __ovld __cnfn asinpi(float x);
+float2 __ovld __cnfn asinpi(float2 x);
+float3 __ovld __cnfn asinpi(float3 x);
+float4 __ovld __cnfn asinpi(float4 x);
+float8 __ovld __cnfn asinpi(float8 x);
+float16 __ovld __cnfn asinpi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn asinpi(double x);
+double2 __ovld __cnfn asinpi(double2 x);
+double3 __ovld __cnfn asinpi(double3 x);
+double4 __ovld __cnfn asinpi(double4 x);
+double8 __ovld __cnfn asinpi(double8 x);
+double16 __ovld __cnfn asinpi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn asinpi(half x);
+half2 __ovld __cnfn asinpi(half2 x);
+half3 __ovld __cnfn asinpi(half3 x);
+half4 __ovld __cnfn asinpi(half4 x);
+half8 __ovld __cnfn asinpi(half8 x);
+half16 __ovld __cnfn asinpi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Arc tangent function.
+ */
+float __ovld __cnfn atan(float y_over_x);
+float2 __ovld __cnfn atan(float2 y_over_x);
+float3 __ovld __cnfn atan(float3 y_over_x);
+float4 __ovld __cnfn atan(float4 y_over_x);
+float8 __ovld __cnfn atan(float8 y_over_x);
+float16 __ovld __cnfn atan(float16 y_over_x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan(double y_over_x);
+double2 __ovld __cnfn atan(double2 y_over_x);
+double3 __ovld __cnfn atan(double3 y_over_x);
+double4 __ovld __cnfn atan(double4 y_over_x);
+double8 __ovld __cnfn atan(double8 y_over_x);
+double16 __ovld __cnfn atan(double16 y_over_x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan(half y_over_x);
+half2 __ovld __cnfn atan(half2 y_over_x);
+half3 __ovld __cnfn atan(half3 y_over_x);
+half4 __ovld __cnfn atan(half4 y_over_x);
+half8 __ovld __cnfn atan(half8 y_over_x);
+half16 __ovld __cnfn atan(half16 y_over_x);
+#endif //cl_khr_fp16
+
+/**
+ * Arc tangent of y / x.
+ */
+float __ovld __cnfn atan2(float y, float x);
+float2 __ovld __cnfn atan2(float2 y, float2 x);
+float3 __ovld __cnfn atan2(float3 y, float3 x);
+float4 __ovld __cnfn atan2(float4 y, float4 x);
+float8 __ovld __cnfn atan2(float8 y, float8 x);
+float16 __ovld __cnfn atan2(float16 y, float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan2(double y, double x);
+double2 __ovld __cnfn atan2(double2 y, double2 x);
+double3 __ovld __cnfn atan2(double3 y, double3 x);
+double4 __ovld __cnfn atan2(double4 y, double4 x);
+double8 __ovld __cnfn atan2(double8 y, double8 x);
+double16 __ovld __cnfn atan2(double16 y, double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan2(half y, half x);
+half2 __ovld __cnfn atan2(half2 y, half2 x);
+half3 __ovld __cnfn atan2(half3 y, half3 x);
+half4 __ovld __cnfn atan2(half4 y, half4 x);
+half8 __ovld __cnfn atan2(half8 y, half8 x);
+half16 __ovld __cnfn atan2(half16 y, half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Hyperbolic arc tangent.
+ */
+float __ovld __cnfn atanh(float);
+float2 __ovld __cnfn atanh(float2);
+float3 __ovld __cnfn atanh(float3);
+float4 __ovld __cnfn atanh(float4);
+float8 __ovld __cnfn atanh(float8);
+float16 __ovld __cnfn atanh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atanh(double);
+double2 __ovld __cnfn atanh(double2);
+double3 __ovld __cnfn atanh(double3);
+double4 __ovld __cnfn atanh(double4);
+double8 __ovld __cnfn atanh(double8);
+double16 __ovld __cnfn atanh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atanh(half);
+half2 __ovld __cnfn atanh(half2);
+half3 __ovld __cnfn atanh(half3);
+half4 __ovld __cnfn atanh(half4);
+half8 __ovld __cnfn atanh(half8);
+half16 __ovld __cnfn atanh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute atan (x) / PI.
+ */
+float __ovld __cnfn atanpi(float x);
+float2 __ovld __cnfn atanpi(float2 x);
+float3 __ovld __cnfn atanpi(float3 x);
+float4 __ovld __cnfn atanpi(float4 x);
+float8 __ovld __cnfn atanpi(float8 x);
+float16 __ovld __cnfn atanpi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atanpi(double x);
+double2 __ovld __cnfn atanpi(double2 x);
+double3 __ovld __cnfn atanpi(double3 x);
+double4 __ovld __cnfn atanpi(double4 x);
+double8 __ovld __cnfn atanpi(double8 x);
+double16 __ovld __cnfn atanpi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atanpi(half x);
+half2 __ovld __cnfn atanpi(half2 x);
+half3 __ovld __cnfn atanpi(half3 x);
+half4 __ovld __cnfn atanpi(half4 x);
+half8 __ovld __cnfn atanpi(half8 x);
+half16 __ovld __cnfn atanpi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute atan2 (y, x) / PI.
+ */
+float __ovld __cnfn atan2pi(float y, float x);
+float2 __ovld __cnfn atan2pi(float2 y, float2 x);
+float3 __ovld __cnfn atan2pi(float3 y, float3 x);
+float4 __ovld __cnfn atan2pi(float4 y, float4 x);
+float8 __ovld __cnfn atan2pi(float8 y, float8 x);
+float16 __ovld __cnfn atan2pi(float16 y, float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn atan2pi(double y, double x);
+double2 __ovld __cnfn atan2pi(double2 y, double2 x);
+double3 __ovld __cnfn atan2pi(double3 y, double3 x);
+double4 __ovld __cnfn atan2pi(double4 y, double4 x);
+double8 __ovld __cnfn atan2pi(double8 y, double8 x);
+double16 __ovld __cnfn atan2pi(double16 y, double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn atan2pi(half y, half x);
+half2 __ovld __cnfn atan2pi(half2 y, half2 x);
+half3 __ovld __cnfn atan2pi(half3 y, half3 x);
+half4 __ovld __cnfn atan2pi(half4 y, half4 x);
+half8 __ovld __cnfn atan2pi(half8 y, half8 x);
+half16 __ovld __cnfn atan2pi(half16 y, half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cube-root.
+ */
+float __ovld __cnfn cbrt(float);
+float2 __ovld __cnfn cbrt(float2);
+float3 __ovld __cnfn cbrt(float3);
+float4 __ovld __cnfn cbrt(float4);
+float8 __ovld __cnfn cbrt(float8);
+float16 __ovld __cnfn cbrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cbrt(double);
+double2 __ovld __cnfn cbrt(double2);
+double3 __ovld __cnfn cbrt(double3);
+double4 __ovld __cnfn cbrt(double4);
+double8 __ovld __cnfn cbrt(double8);
+double16 __ovld __cnfn cbrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cbrt(half);
+half2 __ovld __cnfn cbrt(half2);
+half3 __ovld __cnfn cbrt(half3);
+half4 __ovld __cnfn cbrt(half4);
+half8 __ovld __cnfn cbrt(half8);
+half16 __ovld __cnfn cbrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to positive
+ * infinity rounding mode.
+ */
+float __ovld __cnfn ceil(float);
+float2 __ovld __cnfn ceil(float2);
+float3 __ovld __cnfn ceil(float3);
+float4 __ovld __cnfn ceil(float4);
+float8 __ovld __cnfn ceil(float8);
+float16 __ovld __cnfn ceil(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn ceil(double);
+double2 __ovld __cnfn ceil(double2);
+double3 __ovld __cnfn ceil(double3);
+double4 __ovld __cnfn ceil(double4);
+double8 __ovld __cnfn ceil(double8);
+double16 __ovld __cnfn ceil(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn ceil(half);
+half2 __ovld __cnfn ceil(half2);
+half3 __ovld __cnfn ceil(half3);
+half4 __ovld __cnfn ceil(half4);
+half8 __ovld __cnfn ceil(half8);
+half16 __ovld __cnfn ceil(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x with its sign changed to match the sign of y.
+ */
+float __ovld __cnfn copysign(float x, float y);
+float2 __ovld __cnfn copysign(float2 x, float2 y);
+float3 __ovld __cnfn copysign(float3 x, float3 y);
+float4 __ovld __cnfn copysign(float4 x, float4 y);
+float8 __ovld __cnfn copysign(float8 x, float8 y);
+float16 __ovld __cnfn copysign(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn copysign(double x, double y);
+double2 __ovld __cnfn copysign(double2 x, double2 y);
+double3 __ovld __cnfn copysign(double3 x, double3 y);
+double4 __ovld __cnfn copysign(double4 x, double4 y);
+double8 __ovld __cnfn copysign(double8 x, double8 y);
+double16 __ovld __cnfn copysign(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn copysign(half x, half y);
+half2 __ovld __cnfn copysign(half2 x, half2 y);
+half3 __ovld __cnfn copysign(half3 x, half3 y);
+half4 __ovld __cnfn copysign(half4 x, half4 y);
+half8 __ovld __cnfn copysign(half8 x, half8 y);
+half16 __ovld __cnfn copysign(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cosine.
+ */
+float __ovld __cnfn cos(float);
+float2 __ovld __cnfn cos(float2);
+float3 __ovld __cnfn cos(float3);
+float4 __ovld __cnfn cos(float4);
+float8 __ovld __cnfn cos(float8);
+float16 __ovld __cnfn cos(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cos(double);
+double2 __ovld __cnfn cos(double2);
+double3 __ovld __cnfn cos(double3);
+double4 __ovld __cnfn cos(double4);
+double8 __ovld __cnfn cos(double8);
+double16 __ovld __cnfn cos(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cos(half);
+half2 __ovld __cnfn cos(half2);
+half3 __ovld __cnfn cos(half3);
+half4 __ovld __cnfn cos(half4);
+half8 __ovld __cnfn cos(half8);
+half16 __ovld __cnfn cos(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute hyperbolic cosine.
+ */
+float __ovld __cnfn cosh(float);
+float2 __ovld __cnfn cosh(float2);
+float3 __ovld __cnfn cosh(float3);
+float4 __ovld __cnfn cosh(float4);
+float8 __ovld __cnfn cosh(float8);
+float16 __ovld __cnfn cosh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cosh(double);
+double2 __ovld __cnfn cosh(double2);
+double3 __ovld __cnfn cosh(double3);
+double4 __ovld __cnfn cosh(double4);
+double8 __ovld __cnfn cosh(double8);
+double16 __ovld __cnfn cosh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cosh(half);
+half2 __ovld __cnfn cosh(half2);
+half3 __ovld __cnfn cosh(half3);
+half4 __ovld __cnfn cosh(half4);
+half8 __ovld __cnfn cosh(half8);
+half16 __ovld __cnfn cosh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cos (PI * x).
+ */
+float __ovld __cnfn cospi(float x);
+float2 __ovld __cnfn cospi(float2 x);
+float3 __ovld __cnfn cospi(float3 x);
+float4 __ovld __cnfn cospi(float4 x);
+float8 __ovld __cnfn cospi(float8 x);
+float16 __ovld __cnfn cospi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn cospi(double x);
+double2 __ovld __cnfn cospi(double2 x);
+double3 __ovld __cnfn cospi(double3 x);
+double4 __ovld __cnfn cospi(double4 x);
+double8 __ovld __cnfn cospi(double8 x);
+double16 __ovld __cnfn cospi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn cospi(half x);
+half2 __ovld __cnfn cospi(half2 x);
+half3 __ovld __cnfn cospi(half3 x);
+half4 __ovld __cnfn cospi(half4 x);
+half8 __ovld __cnfn cospi(half8 x);
+half16 __ovld __cnfn cospi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Complementary error function.
+ */
+float __ovld __cnfn erfc(float);
+float2 __ovld __cnfn erfc(float2);
+float3 __ovld __cnfn erfc(float3);
+float4 __ovld __cnfn erfc(float4);
+float8 __ovld __cnfn erfc(float8);
+float16 __ovld __cnfn erfc(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn erfc(double);
+double2 __ovld __cnfn erfc(double2);
+double3 __ovld __cnfn erfc(double3);
+double4 __ovld __cnfn erfc(double4);
+double8 __ovld __cnfn erfc(double8);
+double16 __ovld __cnfn erfc(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn erfc(half);
+half2 __ovld __cnfn erfc(half2);
+half3 __ovld __cnfn erfc(half3);
+half4 __ovld __cnfn erfc(half4);
+half8 __ovld __cnfn erfc(half8);
+half16 __ovld __cnfn erfc(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Error function encountered in integrating the
+ * normal distribution.
+ */
+float __ovld __cnfn erf(float);
+float2 __ovld __cnfn erf(float2);
+float3 __ovld __cnfn erf(float3);
+float4 __ovld __cnfn erf(float4);
+float8 __ovld __cnfn erf(float8);
+float16 __ovld __cnfn erf(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn erf(double);
+double2 __ovld __cnfn erf(double2);
+double3 __ovld __cnfn erf(double3);
+double4 __ovld __cnfn erf(double4);
+double8 __ovld __cnfn erf(double8);
+double16 __ovld __cnfn erf(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn erf(half);
+half2 __ovld __cnfn erf(half2);
+half3 __ovld __cnfn erf(half3);
+half4 __ovld __cnfn erf(half4);
+half8 __ovld __cnfn erf(half8);
+half16 __ovld __cnfn erf(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the base e exponential function of x.
+ */
+float __ovld __cnfn exp(float x);
+float2 __ovld __cnfn exp(float2 x);
+float3 __ovld __cnfn exp(float3 x);
+float4 __ovld __cnfn exp(float4 x);
+float8 __ovld __cnfn exp(float8 x);
+float16 __ovld __cnfn exp(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp(double x);
+double2 __ovld __cnfn exp(double2 x);
+double3 __ovld __cnfn exp(double3 x);
+double4 __ovld __cnfn exp(double4 x);
+double8 __ovld __cnfn exp(double8 x);
+double16 __ovld __cnfn exp(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp(half x);
+half2 __ovld __cnfn exp(half2 x);
+half3 __ovld __cnfn exp(half3 x);
+half4 __ovld __cnfn exp(half4 x);
+half8 __ovld __cnfn exp(half8 x);
+half16 __ovld __cnfn exp(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Exponential base 2 function.
+ */
+float __ovld __cnfn exp2(float);
+float2 __ovld __cnfn exp2(float2);
+float3 __ovld __cnfn exp2(float3);
+float4 __ovld __cnfn exp2(float4);
+float8 __ovld __cnfn exp2(float8);
+float16 __ovld __cnfn exp2(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp2(double);
+double2 __ovld __cnfn exp2(double2);
+double3 __ovld __cnfn exp2(double3);
+double4 __ovld __cnfn exp2(double4);
+double8 __ovld __cnfn exp2(double8);
+double16 __ovld __cnfn exp2(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp2(half);
+half2 __ovld __cnfn exp2(half2);
+half3 __ovld __cnfn exp2(half3);
+half4 __ovld __cnfn exp2(half4);
+half8 __ovld __cnfn exp2(half8);
+half16 __ovld __cnfn exp2(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Exponential base 10 function.
+ */
+float __ovld __cnfn exp10(float);
+float2 __ovld __cnfn exp10(float2);
+float3 __ovld __cnfn exp10(float3);
+float4 __ovld __cnfn exp10(float4);
+float8 __ovld __cnfn exp10(float8);
+float16 __ovld __cnfn exp10(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn exp10(double);
+double2 __ovld __cnfn exp10(double2);
+double3 __ovld __cnfn exp10(double3);
+double4 __ovld __cnfn exp10(double4);
+double8 __ovld __cnfn exp10(double8);
+double16 __ovld __cnfn exp10(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn exp10(half);
+half2 __ovld __cnfn exp10(half2);
+half3 __ovld __cnfn exp10(half3);
+half4 __ovld __cnfn exp10(half4);
+half8 __ovld __cnfn exp10(half8);
+half16 __ovld __cnfn exp10(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute e^x- 1.0.
+ */
+float __ovld __cnfn expm1(float x);
+float2 __ovld __cnfn expm1(float2 x);
+float3 __ovld __cnfn expm1(float3 x);
+float4 __ovld __cnfn expm1(float4 x);
+float8 __ovld __cnfn expm1(float8 x);
+float16 __ovld __cnfn expm1(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn expm1(double x);
+double2 __ovld __cnfn expm1(double2 x);
+double3 __ovld __cnfn expm1(double3 x);
+double4 __ovld __cnfn expm1(double4 x);
+double8 __ovld __cnfn expm1(double8 x);
+double16 __ovld __cnfn expm1(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn expm1(half x);
+half2 __ovld __cnfn expm1(half2 x);
+half3 __ovld __cnfn expm1(half3 x);
+half4 __ovld __cnfn expm1(half4 x);
+half8 __ovld __cnfn expm1(half8 x);
+half16 __ovld __cnfn expm1(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute absolute value of a floating-point number.
+ */
+float __ovld __cnfn fabs(float);
+float2 __ovld __cnfn fabs(float2);
+float3 __ovld __cnfn fabs(float3);
+float4 __ovld __cnfn fabs(float4);
+float8 __ovld __cnfn fabs(float8);
+float16 __ovld __cnfn fabs(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fabs(double);
+double2 __ovld __cnfn fabs(double2);
+double3 __ovld __cnfn fabs(double3);
+double4 __ovld __cnfn fabs(double4);
+double8 __ovld __cnfn fabs(double8);
+double16 __ovld __cnfn fabs(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fabs(half);
+half2 __ovld __cnfn fabs(half2);
+half3 __ovld __cnfn fabs(half3);
+half4 __ovld __cnfn fabs(half4);
+half8 __ovld __cnfn fabs(half8);
+half16 __ovld __cnfn fabs(half16);
+#endif //cl_khr_fp16
+
+/**
+ * x - y if x > y, +0 if x is less than or equal to y.
+ */
+float __ovld __cnfn fdim(float x, float y);
+float2 __ovld __cnfn fdim(float2 x, float2 y);
+float3 __ovld __cnfn fdim(float3 x, float3 y);
+float4 __ovld __cnfn fdim(float4 x, float4 y);
+float8 __ovld __cnfn fdim(float8 x, float8 y);
+float16 __ovld __cnfn fdim(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fdim(double x, double y);
+double2 __ovld __cnfn fdim(double2 x, double2 y);
+double3 __ovld __cnfn fdim(double3 x, double3 y);
+double4 __ovld __cnfn fdim(double4 x, double4 y);
+double8 __ovld __cnfn fdim(double8 x, double8 y);
+double16 __ovld __cnfn fdim(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fdim(half x, half y);
+half2 __ovld __cnfn fdim(half2 x, half2 y);
+half3 __ovld __cnfn fdim(half3 x, half3 y);
+half4 __ovld __cnfn fdim(half4 x, half4 y);
+half8 __ovld __cnfn fdim(half8 x, half8 y);
+half16 __ovld __cnfn fdim(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to -ve
+ * infinity rounding mode.
+ */
+float __ovld __cnfn floor(float);
+float2 __ovld __cnfn floor(float2);
+float3 __ovld __cnfn floor(float3);
+float4 __ovld __cnfn floor(float4);
+float8 __ovld __cnfn floor(float8);
+float16 __ovld __cnfn floor(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn floor(double);
+double2 __ovld __cnfn floor(double2);
+double3 __ovld __cnfn floor(double3);
+double4 __ovld __cnfn floor(double4);
+double8 __ovld __cnfn floor(double8);
+double16 __ovld __cnfn floor(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn floor(half);
+half2 __ovld __cnfn floor(half2);
+half3 __ovld __cnfn floor(half3);
+half4 __ovld __cnfn floor(half4);
+half8 __ovld __cnfn floor(half8);
+half16 __ovld __cnfn floor(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the correctly rounded floating-point
+ * representation of the sum of c with the infinitely
+ * precise product of a and b. Rounding of
+ * intermediate products shall not occur. Edge case
+ * behavior is per the IEEE 754-2008 standard.
+ */
+float __ovld __cnfn fma(float a, float b, float c);
+float2 __ovld __cnfn fma(float2 a, float2 b, float2 c);
+float3 __ovld __cnfn fma(float3 a, float3 b, float3 c);
+float4 __ovld __cnfn fma(float4 a, float4 b, float4 c);
+float8 __ovld __cnfn fma(float8 a, float8 b, float8 c);
+float16 __ovld __cnfn fma(float16 a, float16 b, float16 c);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fma(double a, double b, double c);
+double2 __ovld __cnfn fma(double2 a, double2 b, double2 c);
+double3 __ovld __cnfn fma(double3 a, double3 b, double3 c);
+double4 __ovld __cnfn fma(double4 a, double4 b, double4 c);
+double8 __ovld __cnfn fma(double8 a, double8 b, double8 c);
+double16 __ovld __cnfn fma(double16 a, double16 b, double16 c);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fma(half a, half b, half c);
+half2 __ovld __cnfn fma(half2 a, half2 b, half2 c);
+half3 __ovld __cnfn fma(half3 a, half3 b, half3 c);
+half4 __ovld __cnfn fma(half4 a, half4 b, half4 c);
+half8 __ovld __cnfn fma(half8 a, half8 b, half8 c);
+half16 __ovld __cnfn fma(half16 a, half16 b, half16 c);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if x < y, otherwise it returns x. If one
+ * argument is a NaN, fmax() returns the other
+ * argument. If both arguments are NaNs, fmax()
+ * returns a NaN.
+ */
+float __ovld __cnfn fmax(float x, float y);
+float2 __ovld __cnfn fmax(float2 x, float2 y);
+float3 __ovld __cnfn fmax(float3 x, float3 y);
+float4 __ovld __cnfn fmax(float4 x, float4 y);
+float8 __ovld __cnfn fmax(float8 x, float8 y);
+float16 __ovld __cnfn fmax(float16 x, float16 y);
+float2 __ovld __cnfn fmax(float2 x, float y);
+float3 __ovld __cnfn fmax(float3 x, float y);
+float4 __ovld __cnfn fmax(float4 x, float y);
+float8 __ovld __cnfn fmax(float8 x, float y);
+float16 __ovld __cnfn fmax(float16 x, float y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmax(double x, double y);
+double2 __ovld __cnfn fmax(double2 x, double2 y);
+double3 __ovld __cnfn fmax(double3 x, double3 y);
+double4 __ovld __cnfn fmax(double4 x, double4 y);
+double8 __ovld __cnfn fmax(double8 x, double8 y);
+double16 __ovld __cnfn fmax(double16 x, double16 y);
+double2 __ovld __cnfn fmax(double2 x, double y);
+double3 __ovld __cnfn fmax(double3 x, double y);
+double4 __ovld __cnfn fmax(double4 x, double y);
+double8 __ovld __cnfn fmax(double8 x, double y);
+double16 __ovld __cnfn fmax(double16 x, double y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmax(half x, half y);
+half2 __ovld __cnfn fmax(half2 x, half2 y);
+half3 __ovld __cnfn fmax(half3 x, half3 y);
+half4 __ovld __cnfn fmax(half4 x, half4 y);
+half8 __ovld __cnfn fmax(half8 x, half8 y);
+half16 __ovld __cnfn fmax(half16 x, half16 y);
+half2 __ovld __cnfn fmax(half2 x, half y);
+half3 __ovld __cnfn fmax(half3 x, half y);
+half4 __ovld __cnfn fmax(half4 x, half y);
+half8 __ovld __cnfn fmax(half8 x, half y);
+half16 __ovld __cnfn fmax(half16 x, half y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if y < x, otherwise it returns x. If one
+ * argument is a NaN, fmin() returns the other
+ * argument. If both arguments are NaNs, fmin()
+ * returns a NaN.
+ */
+float __ovld __cnfn fmin(float x, float y);
+float2 __ovld __cnfn fmin(float2 x, float2 y);
+float3 __ovld __cnfn fmin(float3 x, float3 y);
+float4 __ovld __cnfn fmin(float4 x, float4 y);
+float8 __ovld __cnfn fmin(float8 x, float8 y);
+float16 __ovld __cnfn fmin(float16 x, float16 y);
+float2 __ovld __cnfn fmin(float2 x, float y);
+float3 __ovld __cnfn fmin(float3 x, float y);
+float4 __ovld __cnfn fmin(float4 x, float y);
+float8 __ovld __cnfn fmin(float8 x, float y);
+float16 __ovld __cnfn fmin(float16 x, float y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmin(double x, double y);
+double2 __ovld __cnfn fmin(double2 x, double2 y);
+double3 __ovld __cnfn fmin(double3 x, double3 y);
+double4 __ovld __cnfn fmin(double4 x, double4 y);
+double8 __ovld __cnfn fmin(double8 x, double8 y);
+double16 __ovld __cnfn fmin(double16 x, double16 y);
+double2 __ovld __cnfn fmin(double2 x, double y);
+double3 __ovld __cnfn fmin(double3 x, double y);
+double4 __ovld __cnfn fmin(double4 x, double y);
+double8 __ovld __cnfn fmin(double8 x, double y);
+double16 __ovld __cnfn fmin(double16 x, double y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmin(half x, half y);
+half2 __ovld __cnfn fmin(half2 x, half2 y);
+half3 __ovld __cnfn fmin(half3 x, half3 y);
+half4 __ovld __cnfn fmin(half4 x, half4 y);
+half8 __ovld __cnfn fmin(half8 x, half8 y);
+half16 __ovld __cnfn fmin(half16 x, half16 y);
+half2 __ovld __cnfn fmin(half2 x, half y);
+half3 __ovld __cnfn fmin(half3 x, half y);
+half4 __ovld __cnfn fmin(half4 x, half y);
+half8 __ovld __cnfn fmin(half8 x, half y);
+half16 __ovld __cnfn fmin(half16 x, half y);
+#endif //cl_khr_fp16
+
+/**
+ * Modulus. Returns x - y * trunc (x/y).
+ */
+float __ovld __cnfn fmod(float x, float y);
+float2 __ovld __cnfn fmod(float2 x, float2 y);
+float3 __ovld __cnfn fmod(float3 x, float3 y);
+float4 __ovld __cnfn fmod(float4 x, float4 y);
+float8 __ovld __cnfn fmod(float8 x, float8 y);
+float16 __ovld __cnfn fmod(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn fmod(double x, double y);
+double2 __ovld __cnfn fmod(double2 x, double2 y);
+double3 __ovld __cnfn fmod(double3 x, double3 y);
+double4 __ovld __cnfn fmod(double4 x, double4 y);
+double8 __ovld __cnfn fmod(double8 x, double8 y);
+double16 __ovld __cnfn fmod(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn fmod(half x, half y);
+half2 __ovld __cnfn fmod(half2 x, half2 y);
+half3 __ovld __cnfn fmod(half3 x, half3 y);
+half4 __ovld __cnfn fmod(half4 x, half4 y);
+half8 __ovld __cnfn fmod(half8 x, half8 y);
+half16 __ovld __cnfn fmod(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns fmin(x - floor (x), 0x1.fffffep-1f ).
+ * floor(x) is returned in iptr.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld fract(float x, float *iptr);
+float2 __ovld fract(float2 x, float2 *iptr);
+float3 __ovld fract(float3 x, float3 *iptr);
+float4 __ovld fract(float4 x, float4 *iptr);
+float8 __ovld fract(float8 x, float8 *iptr);
+float16 __ovld fract(float16 x, float16 *iptr);
+#ifdef cl_khr_fp64
+double __ovld fract(double x, double *iptr);
+double2 __ovld fract(double2 x, double2 *iptr);
+double3 __ovld fract(double3 x, double3 *iptr);
+double4 __ovld fract(double4 x, double4 *iptr);
+double8 __ovld fract(double8 x, double8 *iptr);
+double16 __ovld fract(double16 x, double16 *iptr);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld fract(half x, half *iptr);
+half2 __ovld fract(half2 x, half2 *iptr);
+half3 __ovld fract(half3 x, half3 *iptr);
+half4 __ovld fract(half4 x, half4 *iptr);
+half8 __ovld fract(half8 x, half8 *iptr);
+half16 __ovld fract(half16 x, half16 *iptr);
+#endif //cl_khr_fp16
+#else
+float __ovld fract(float x, __global float *iptr);
+float2 __ovld fract(float2 x, __global float2 *iptr);
+float3 __ovld fract(float3 x, __global float3 *iptr);
+float4 __ovld fract(float4 x, __global float4 *iptr);
+float8 __ovld fract(float8 x, __global float8 *iptr);
+float16 __ovld fract(float16 x, __global float16 *iptr);
+float __ovld fract(float x, __local float *iptr);
+float2 __ovld fract(float2 x, __local float2 *iptr);
+float3 __ovld fract(float3 x, __local float3 *iptr);
+float4 __ovld fract(float4 x, __local float4 *iptr);
+float8 __ovld fract(float8 x, __local float8 *iptr);
+float16 __ovld fract(float16 x, __local float16 *iptr);
+float __ovld fract(float x, __private float *iptr);
+float2 __ovld fract(float2 x, __private float2 *iptr);
+float3 __ovld fract(float3 x, __private float3 *iptr);
+float4 __ovld fract(float4 x, __private float4 *iptr);
+float8 __ovld fract(float8 x, __private float8 *iptr);
+float16 __ovld fract(float16 x, __private float16 *iptr);
+#ifdef cl_khr_fp64
+double __ovld fract(double x, __global double *iptr);
+double2 __ovld fract(double2 x, __global double2 *iptr);
+double3 __ovld fract(double3 x, __global double3 *iptr);
+double4 __ovld fract(double4 x, __global double4 *iptr);
+double8 __ovld fract(double8 x, __global double8 *iptr);
+double16 __ovld fract(double16 x, __global double16 *iptr);
+double __ovld fract(double x, __local double *iptr);
+double2 __ovld fract(double2 x, __local double2 *iptr);
+double3 __ovld fract(double3 x, __local double3 *iptr);
+double4 __ovld fract(double4 x, __local double4 *iptr);
+double8 __ovld fract(double8 x, __local double8 *iptr);
+double16 __ovld fract(double16 x, __local double16 *iptr);
+double __ovld fract(double x, __private double *iptr);
+double2 __ovld fract(double2 x, __private double2 *iptr);
+double3 __ovld fract(double3 x, __private double3 *iptr);
+double4 __ovld fract(double4 x, __private double4 *iptr);
+double8 __ovld fract(double8 x, __private double8 *iptr);
+double16 __ovld fract(double16 x, __private double16 *iptr);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld fract(half x, __global half *iptr);
+half2 __ovld fract(half2 x, __global half2 *iptr);
+half3 __ovld fract(half3 x, __global half3 *iptr);
+half4 __ovld fract(half4 x, __global half4 *iptr);
+half8 __ovld fract(half8 x, __global half8 *iptr);
+half16 __ovld fract(half16 x, __global half16 *iptr);
+half __ovld fract(half x, __local half *iptr);
+half2 __ovld fract(half2 x, __local half2 *iptr);
+half3 __ovld fract(half3 x, __local half3 *iptr);
+half4 __ovld fract(half4 x, __local half4 *iptr);
+half8 __ovld fract(half8 x, __local half8 *iptr);
+half16 __ovld fract(half16 x, __local half16 *iptr);
+half __ovld fract(half x, __private half *iptr);
+half2 __ovld fract(half2 x, __private half2 *iptr);
+half3 __ovld fract(half3 x, __private half3 *iptr);
+half4 __ovld fract(half4 x, __private half4 *iptr);
+half8 __ovld fract(half8 x, __private half8 *iptr);
+half16 __ovld fract(half16 x, __private half16 *iptr);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Extract mantissa and exponent from x. For each
+ * component the mantissa returned is a float with
+ * magnitude in the interval [1/2, 1) or 0. Each
+ * component of x equals mantissa returned * 2^exp.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld frexp(float x, int *exp);
+float2 __ovld frexp(float2 x, int2 *exp);
+float3 __ovld frexp(float3 x, int3 *exp);
+float4 __ovld frexp(float4 x, int4 *exp);
+float8 __ovld frexp(float8 x, int8 *exp);
+float16 __ovld frexp(float16 x, int16 *exp);
+#ifdef cl_khr_fp64
+double __ovld frexp(double x, int *exp);
+double2 __ovld frexp(double2 x, int2 *exp);
+double3 __ovld frexp(double3 x, int3 *exp);
+double4 __ovld frexp(double4 x, int4 *exp);
+double8 __ovld frexp(double8 x, int8 *exp);
+double16 __ovld frexp(double16 x, int16 *exp);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld frexp(half x, int *exp);
+half2 __ovld frexp(half2 x, int2 *exp);
+half3 __ovld frexp(half3 x, int3 *exp);
+half4 __ovld frexp(half4 x, int4 *exp);
+half8 __ovld frexp(half8 x, int8 *exp);
+half16 __ovld frexp(half16 x, int16 *exp);
+#endif //cl_khr_fp16
+#else
+float __ovld frexp(float x, __global int *exp);
+float2 __ovld frexp(float2 x, __global int2 *exp);
+float3 __ovld frexp(float3 x, __global int3 *exp);
+float4 __ovld frexp(float4 x, __global int4 *exp);
+float8 __ovld frexp(float8 x, __global int8 *exp);
+float16 __ovld frexp(float16 x, __global int16 *exp);
+float __ovld frexp(float x, __local int *exp);
+float2 __ovld frexp(float2 x, __local int2 *exp);
+float3 __ovld frexp(float3 x, __local int3 *exp);
+float4 __ovld frexp(float4 x, __local int4 *exp);
+float8 __ovld frexp(float8 x, __local int8 *exp);
+float16 __ovld frexp(float16 x, __local int16 *exp);
+float __ovld frexp(float x, __private int *exp);
+float2 __ovld frexp(float2 x, __private int2 *exp);
+float3 __ovld frexp(float3 x, __private int3 *exp);
+float4 __ovld frexp(float4 x, __private int4 *exp);
+float8 __ovld frexp(float8 x, __private int8 *exp);
+float16 __ovld frexp(float16 x, __private int16 *exp);
+#ifdef cl_khr_fp64
+double __ovld frexp(double x, __global int *exp);
+double2 __ovld frexp(double2 x, __global int2 *exp);
+double3 __ovld frexp(double3 x, __global int3 *exp);
+double4 __ovld frexp(double4 x, __global int4 *exp);
+double8 __ovld frexp(double8 x, __global int8 *exp);
+double16 __ovld frexp(double16 x, __global int16 *exp);
+double __ovld frexp(double x, __local int *exp);
+double2 __ovld frexp(double2 x, __local int2 *exp);
+double3 __ovld frexp(double3 x, __local int3 *exp);
+double4 __ovld frexp(double4 x, __local int4 *exp);
+double8 __ovld frexp(double8 x, __local int8 *exp);
+double16 __ovld frexp(double16 x, __local int16 *exp);
+double __ovld frexp(double x, __private int *exp);
+double2 __ovld frexp(double2 x, __private int2 *exp);
+double3 __ovld frexp(double3 x, __private int3 *exp);
+double4 __ovld frexp(double4 x, __private int4 *exp);
+double8 __ovld frexp(double8 x, __private int8 *exp);
+double16 __ovld frexp(double16 x, __private int16 *exp);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld frexp(half x, __global int *exp);
+half2 __ovld frexp(half2 x, __global int2 *exp);
+half3 __ovld frexp(half3 x, __global int3 *exp);
+half4 __ovld frexp(half4 x, __global int4 *exp);
+half8 __ovld frexp(half8 x, __global int8 *exp);
+half16 __ovld frexp(half16 x, __global int16 *exp);
+half __ovld frexp(half x, __local int *exp);
+half2 __ovld frexp(half2 x, __local int2 *exp);
+half3 __ovld frexp(half3 x, __local int3 *exp);
+half4 __ovld frexp(half4 x, __local int4 *exp);
+half8 __ovld frexp(half8 x, __local int8 *exp);
+half16 __ovld frexp(half16 x, __local int16 *exp);
+half __ovld frexp(half x, __private int *exp);
+half2 __ovld frexp(half2 x, __private int2 *exp);
+half3 __ovld frexp(half3 x, __private int3 *exp);
+half4 __ovld frexp(half4 x, __private int4 *exp);
+half8 __ovld frexp(half8 x, __private int8 *exp);
+half16 __ovld frexp(half16 x, __private int16 *exp);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Compute the value of the square root of x^2 + y^2
+ * without undue overflow or underflow.
+ */
+float __ovld __cnfn hypot(float x, float y);
+float2 __ovld __cnfn hypot(float2 x, float2 y);
+float3 __ovld __cnfn hypot(float3 x, float3 y);
+float4 __ovld __cnfn hypot(float4 x, float4 y);
+float8 __ovld __cnfn hypot(float8 x, float8 y);
+float16 __ovld __cnfn hypot(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn hypot(double x, double y);
+double2 __ovld __cnfn hypot(double2 x, double2 y);
+double3 __ovld __cnfn hypot(double3 x, double3 y);
+double4 __ovld __cnfn hypot(double4 x, double4 y);
+double8 __ovld __cnfn hypot(double8 x, double8 y);
+double16 __ovld __cnfn hypot(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn hypot(half x, half y);
+half2 __ovld __cnfn hypot(half2 x, half2 y);
+half3 __ovld __cnfn hypot(half3 x, half3 y);
+half4 __ovld __cnfn hypot(half4 x, half4 y);
+half8 __ovld __cnfn hypot(half8 x, half8 y);
+half16 __ovld __cnfn hypot(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Return the exponent as an integer value.
+ */
+int __ovld __cnfn ilogb(float x);
+int2 __ovld __cnfn ilogb(float2 x);
+int3 __ovld __cnfn ilogb(float3 x);
+int4 __ovld __cnfn ilogb(float4 x);
+int8 __ovld __cnfn ilogb(float8 x);
+int16 __ovld __cnfn ilogb(float16 x);
+#ifdef cl_khr_fp64
+int __ovld __cnfn ilogb(double x);
+int2 __ovld __cnfn ilogb(double2 x);
+int3 __ovld __cnfn ilogb(double3 x);
+int4 __ovld __cnfn ilogb(double4 x);
+int8 __ovld __cnfn ilogb(double8 x);
+int16 __ovld __cnfn ilogb(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn ilogb(half x);
+int2 __ovld __cnfn ilogb(half2 x);
+int3 __ovld __cnfn ilogb(half3 x);
+int4 __ovld __cnfn ilogb(half4 x);
+int8 __ovld __cnfn ilogb(half8 x);
+int16 __ovld __cnfn ilogb(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Multiply x by 2 to the power n.
+ */
+float __ovld __cnfn ldexp(float x, int n);
+float2 __ovld __cnfn ldexp(float2 x, int2 n);
+float3 __ovld __cnfn ldexp(float3 x, int3 n);
+float4 __ovld __cnfn ldexp(float4 x, int4 n);
+float8 __ovld __cnfn ldexp(float8 x, int8 n);
+float16 __ovld __cnfn ldexp(float16 x, int16 n);
+float2 __ovld __cnfn ldexp(float2 x, int n);
+float3 __ovld __cnfn ldexp(float3 x, int n);
+float4 __ovld __cnfn ldexp(float4 x, int n);
+float8 __ovld __cnfn ldexp(float8 x, int n);
+float16 __ovld __cnfn ldexp(float16 x, int n);
+#ifdef cl_khr_fp64
+double __ovld __cnfn ldexp(double x, int n);
+double2 __ovld __cnfn ldexp(double2 x, int2 n);
+double3 __ovld __cnfn ldexp(double3 x, int3 n);
+double4 __ovld __cnfn ldexp(double4 x, int4 n);
+double8 __ovld __cnfn ldexp(double8 x, int8 n);
+double16 __ovld __cnfn ldexp(double16 x, int16 n);
+double2 __ovld __cnfn ldexp(double2 x, int n);
+double3 __ovld __cnfn ldexp(double3 x, int n);
+double4 __ovld __cnfn ldexp(double4 x, int n);
+double8 __ovld __cnfn ldexp(double8 x, int n);
+double16 __ovld __cnfn ldexp(double16 x, int n);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn ldexp(half x, int n);
+half2 __ovld __cnfn ldexp(half2 x, int2 n);
+half3 __ovld __cnfn ldexp(half3 x, int3 n);
+half4 __ovld __cnfn ldexp(half4 x, int4 n);
+half8 __ovld __cnfn ldexp(half8 x, int8 n);
+half16 __ovld __cnfn ldexp(half16 x, int16 n);
+half2 __ovld __cnfn ldexp(half2 x, int n);
+half3 __ovld __cnfn ldexp(half3 x, int n);
+half4 __ovld __cnfn ldexp(half4 x, int n);
+half8 __ovld __cnfn ldexp(half8 x, int n);
+half16 __ovld __cnfn ldexp(half16 x, int n);
+#endif //cl_khr_fp16
+
+/**
+ * Log gamma function. Returns the natural
+ * logarithm of the absolute value of the gamma
+ * function. The sign of the gamma function is
+ * returned in the signp argument of lgamma_r.
+ */
+float __ovld __cnfn lgamma(float x);
+float2 __ovld __cnfn lgamma(float2 x);
+float3 __ovld __cnfn lgamma(float3 x);
+float4 __ovld __cnfn lgamma(float4 x);
+float8 __ovld __cnfn lgamma(float8 x);
+float16 __ovld __cnfn lgamma(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn lgamma(double x);
+double2 __ovld __cnfn lgamma(double2 x);
+double3 __ovld __cnfn lgamma(double3 x);
+double4 __ovld __cnfn lgamma(double4 x);
+double8 __ovld __cnfn lgamma(double8 x);
+double16 __ovld __cnfn lgamma(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn lgamma(half x);
+half2 __ovld __cnfn lgamma(half2 x);
+half3 __ovld __cnfn lgamma(half3 x);
+half4 __ovld __cnfn lgamma(half4 x);
+half8 __ovld __cnfn lgamma(half8 x);
+half16 __ovld __cnfn lgamma(half16 x);
+#endif //cl_khr_fp16
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld lgamma_r(float x, int *signp);
+float2 __ovld lgamma_r(float2 x, int2 *signp);
+float3 __ovld lgamma_r(float3 x, int3 *signp);
+float4 __ovld lgamma_r(float4 x, int4 *signp);
+float8 __ovld lgamma_r(float8 x, int8 *signp);
+float16 __ovld lgamma_r(float16 x, int16 *signp);
+#ifdef cl_khr_fp64
+double __ovld lgamma_r(double x, int *signp);
+double2 __ovld lgamma_r(double2 x, int2 *signp);
+double3 __ovld lgamma_r(double3 x, int3 *signp);
+double4 __ovld lgamma_r(double4 x, int4 *signp);
+double8 __ovld lgamma_r(double8 x, int8 *signp);
+double16 __ovld lgamma_r(double16 x, int16 *signp);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld lgamma_r(half x, int *signp);
+half2 __ovld lgamma_r(half2 x, int2 *signp);
+half3 __ovld lgamma_r(half3 x, int3 *signp);
+half4 __ovld lgamma_r(half4 x, int4 *signp);
+half8 __ovld lgamma_r(half8 x, int8 *signp);
+half16 __ovld lgamma_r(half16 x, int16 *signp);
+#endif //cl_khr_fp16
+#else
+float __ovld lgamma_r(float x, __global int *signp);
+float2 __ovld lgamma_r(float2 x, __global int2 *signp);
+float3 __ovld lgamma_r(float3 x, __global int3 *signp);
+float4 __ovld lgamma_r(float4 x, __global int4 *signp);
+float8 __ovld lgamma_r(float8 x, __global int8 *signp);
+float16 __ovld lgamma_r(float16 x, __global int16 *signp);
+float __ovld lgamma_r(float x, __local int *signp);
+float2 __ovld lgamma_r(float2 x, __local int2 *signp);
+float3 __ovld lgamma_r(float3 x, __local int3 *signp);
+float4 __ovld lgamma_r(float4 x, __local int4 *signp);
+float8 __ovld lgamma_r(float8 x, __local int8 *signp);
+float16 __ovld lgamma_r(float16 x, __local int16 *signp);
+float __ovld lgamma_r(float x, __private int *signp);
+float2 __ovld lgamma_r(float2 x, __private int2 *signp);
+float3 __ovld lgamma_r(float3 x, __private int3 *signp);
+float4 __ovld lgamma_r(float4 x, __private int4 *signp);
+float8 __ovld lgamma_r(float8 x, __private int8 *signp);
+float16 __ovld lgamma_r(float16 x, __private int16 *signp);
+#ifdef cl_khr_fp64
+double __ovld lgamma_r(double x, __global int *signp);
+double2 __ovld lgamma_r(double2 x, __global int2 *signp);
+double3 __ovld lgamma_r(double3 x, __global int3 *signp);
+double4 __ovld lgamma_r(double4 x, __global int4 *signp);
+double8 __ovld lgamma_r(double8 x, __global int8 *signp);
+double16 __ovld lgamma_r(double16 x, __global int16 *signp);
+double __ovld lgamma_r(double x, __local int *signp);
+double2 __ovld lgamma_r(double2 x, __local int2 *signp);
+double3 __ovld lgamma_r(double3 x, __local int3 *signp);
+double4 __ovld lgamma_r(double4 x, __local int4 *signp);
+double8 __ovld lgamma_r(double8 x, __local int8 *signp);
+double16 __ovld lgamma_r(double16 x, __local int16 *signp);
+double __ovld lgamma_r(double x, __private int *signp);
+double2 __ovld lgamma_r(double2 x, __private int2 *signp);
+double3 __ovld lgamma_r(double3 x, __private int3 *signp);
+double4 __ovld lgamma_r(double4 x, __private int4 *signp);
+double8 __ovld lgamma_r(double8 x, __private int8 *signp);
+double16 __ovld lgamma_r(double16 x, __private int16 *signp);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld lgamma_r(half x, __global int *signp);
+half2 __ovld lgamma_r(half2 x, __global int2 *signp);
+half3 __ovld lgamma_r(half3 x, __global int3 *signp);
+half4 __ovld lgamma_r(half4 x, __global int4 *signp);
+half8 __ovld lgamma_r(half8 x, __global int8 *signp);
+half16 __ovld lgamma_r(half16 x, __global int16 *signp);
+half __ovld lgamma_r(half x, __local int *signp);
+half2 __ovld lgamma_r(half2 x, __local int2 *signp);
+half3 __ovld lgamma_r(half3 x, __local int3 *signp);
+half4 __ovld lgamma_r(half4 x, __local int4 *signp);
+half8 __ovld lgamma_r(half8 x, __local int8 *signp);
+half16 __ovld lgamma_r(half16 x, __local int16 *signp);
+half __ovld lgamma_r(half x, __private int *signp);
+half2 __ovld lgamma_r(half2 x, __private int2 *signp);
+half3 __ovld lgamma_r(half3 x, __private int3 *signp);
+half4 __ovld lgamma_r(half4 x, __private int4 *signp);
+half8 __ovld lgamma_r(half8 x, __private int8 *signp);
+half16 __ovld lgamma_r(half16 x, __private int16 *signp);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Compute natural logarithm.
+ */
+float __ovld __cnfn log(float);
+float2 __ovld __cnfn log(float2);
+float3 __ovld __cnfn log(float3);
+float4 __ovld __cnfn log(float4);
+float8 __ovld __cnfn log(float8);
+float16 __ovld __cnfn log(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log(double);
+double2 __ovld __cnfn log(double2);
+double3 __ovld __cnfn log(double3);
+double4 __ovld __cnfn log(double4);
+double8 __ovld __cnfn log(double8);
+double16 __ovld __cnfn log(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log(half);
+half2 __ovld __cnfn log(half2);
+half3 __ovld __cnfn log(half3);
+half4 __ovld __cnfn log(half4);
+half8 __ovld __cnfn log(half8);
+half16 __ovld __cnfn log(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base 2 logarithm.
+ */
+float __ovld __cnfn log2(float);
+float2 __ovld __cnfn log2(float2);
+float3 __ovld __cnfn log2(float3);
+float4 __ovld __cnfn log2(float4);
+float8 __ovld __cnfn log2(float8);
+float16 __ovld __cnfn log2(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log2(double);
+double2 __ovld __cnfn log2(double2);
+double3 __ovld __cnfn log2(double3);
+double4 __ovld __cnfn log2(double4);
+double8 __ovld __cnfn log2(double8);
+double16 __ovld __cnfn log2(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log2(half);
+half2 __ovld __cnfn log2(half2);
+half3 __ovld __cnfn log2(half3);
+half4 __ovld __cnfn log2(half4);
+half8 __ovld __cnfn log2(half8);
+half16 __ovld __cnfn log2(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base 10 logarithm.
+ */
+float __ovld __cnfn log10(float);
+float2 __ovld __cnfn log10(float2);
+float3 __ovld __cnfn log10(float3);
+float4 __ovld __cnfn log10(float4);
+float8 __ovld __cnfn log10(float8);
+float16 __ovld __cnfn log10(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log10(double);
+double2 __ovld __cnfn log10(double2);
+double3 __ovld __cnfn log10(double3);
+double4 __ovld __cnfn log10(double4);
+double8 __ovld __cnfn log10(double8);
+double16 __ovld __cnfn log10(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log10(half);
+half2 __ovld __cnfn log10(half2);
+half3 __ovld __cnfn log10(half3);
+half4 __ovld __cnfn log10(half4);
+half8 __ovld __cnfn log10(half8);
+half16 __ovld __cnfn log10(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute a base e logarithm of (1.0 + x).
+ */
+float __ovld __cnfn log1p(float x);
+float2 __ovld __cnfn log1p(float2 x);
+float3 __ovld __cnfn log1p(float3 x);
+float4 __ovld __cnfn log1p(float4 x);
+float8 __ovld __cnfn log1p(float8 x);
+float16 __ovld __cnfn log1p(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn log1p(double x);
+double2 __ovld __cnfn log1p(double2 x);
+double3 __ovld __cnfn log1p(double3 x);
+double4 __ovld __cnfn log1p(double4 x);
+double8 __ovld __cnfn log1p(double8 x);
+double16 __ovld __cnfn log1p(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn log1p(half x);
+half2 __ovld __cnfn log1p(half2 x);
+half3 __ovld __cnfn log1p(half3 x);
+half4 __ovld __cnfn log1p(half4 x);
+half8 __ovld __cnfn log1p(half8 x);
+half16 __ovld __cnfn log1p(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the exponent of x, which is the integral
+ * part of logr | x |.
+ */
+float __ovld __cnfn logb(float x);
+float2 __ovld __cnfn logb(float2 x);
+float3 __ovld __cnfn logb(float3 x);
+float4 __ovld __cnfn logb(float4 x);
+float8 __ovld __cnfn logb(float8 x);
+float16 __ovld __cnfn logb(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn logb(double x);
+double2 __ovld __cnfn logb(double2 x);
+double3 __ovld __cnfn logb(double3 x);
+double4 __ovld __cnfn logb(double4 x);
+double8 __ovld __cnfn logb(double8 x);
+double16 __ovld __cnfn logb(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn logb(half x);
+half2 __ovld __cnfn logb(half2 x);
+half3 __ovld __cnfn logb(half3 x);
+half4 __ovld __cnfn logb(half4 x);
+half8 __ovld __cnfn logb(half8 x);
+half16 __ovld __cnfn logb(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * mad approximates a * b + c. Whether or how the
+ * product of a * b is rounded and how supernormal or
+ * subnormal intermediate products are handled is not
+ * defined. mad is intended to be used where speed is
+ * preferred over accuracy.
+ */
+float __ovld __cnfn mad(float a, float b, float c);
+float2 __ovld __cnfn mad(float2 a, float2 b, float2 c);
+float3 __ovld __cnfn mad(float3 a, float3 b, float3 c);
+float4 __ovld __cnfn mad(float4 a, float4 b, float4 c);
+float8 __ovld __cnfn mad(float8 a, float8 b, float8 c);
+float16 __ovld __cnfn mad(float16 a, float16 b, float16 c);
+#ifdef cl_khr_fp64
+double __ovld __cnfn mad(double a, double b, double c);
+double2 __ovld __cnfn mad(double2 a, double2 b, double2 c);
+double3 __ovld __cnfn mad(double3 a, double3 b, double3 c);
+double4 __ovld __cnfn mad(double4 a, double4 b, double4 c);
+double8 __ovld __cnfn mad(double8 a, double8 b, double8 c);
+double16 __ovld __cnfn mad(double16 a, double16 b, double16 c);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn mad(half a, half b, half c);
+half2 __ovld __cnfn mad(half2 a, half2 b, half2 c);
+half3 __ovld __cnfn mad(half3 a, half3 b, half3 c);
+half4 __ovld __cnfn mad(half4 a, half4 b, half4 c);
+half8 __ovld __cnfn mad(half8 a, half8 b, half8 c);
+half16 __ovld __cnfn mad(half16 a, half16 b, half16 c);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x if | x | > | y |, y if | y | > | x |, otherwise
+ * fmax(x, y).
+ */
+float __ovld __cnfn maxmag(float x, float y);
+float2 __ovld __cnfn maxmag(float2 x, float2 y);
+float3 __ovld __cnfn maxmag(float3 x, float3 y);
+float4 __ovld __cnfn maxmag(float4 x, float4 y);
+float8 __ovld __cnfn maxmag(float8 x, float8 y);
+float16 __ovld __cnfn maxmag(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn maxmag(double x, double y);
+double2 __ovld __cnfn maxmag(double2 x, double2 y);
+double3 __ovld __cnfn maxmag(double3 x, double3 y);
+double4 __ovld __cnfn maxmag(double4 x, double4 y);
+double8 __ovld __cnfn maxmag(double8 x, double8 y);
+double16 __ovld __cnfn maxmag(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn maxmag(half x, half y);
+half2 __ovld __cnfn maxmag(half2 x, half2 y);
+half3 __ovld __cnfn maxmag(half3 x, half3 y);
+half4 __ovld __cnfn maxmag(half4 x, half4 y);
+half8 __ovld __cnfn maxmag(half8 x, half8 y);
+half16 __ovld __cnfn maxmag(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns x if | x | < | y |, y if | y | < | x |, otherwise
+ * fmin(x, y).
+ */
+float __ovld __cnfn minmag(float x, float y);
+float2 __ovld __cnfn minmag(float2 x, float2 y);
+float3 __ovld __cnfn minmag(float3 x, float3 y);
+float4 __ovld __cnfn minmag(float4 x, float4 y);
+float8 __ovld __cnfn minmag(float8 x, float8 y);
+float16 __ovld __cnfn minmag(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn minmag(double x, double y);
+double2 __ovld __cnfn minmag(double2 x, double2 y);
+double3 __ovld __cnfn minmag(double3 x, double3 y);
+double4 __ovld __cnfn minmag(double4 x, double4 y);
+double8 __ovld __cnfn minmag(double8 x, double8 y);
+double16 __ovld __cnfn minmag(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn minmag(half x, half y);
+half2 __ovld __cnfn minmag(half2 x, half2 y);
+half3 __ovld __cnfn minmag(half3 x, half3 y);
+half4 __ovld __cnfn minmag(half4 x, half4 y);
+half8 __ovld __cnfn minmag(half8 x, half8 y);
+half16 __ovld __cnfn minmag(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Decompose a floating-point number. The modf
+ * function breaks the argument x into integral and
+ * fractional parts, each of which has the same sign as
+ * the argument. It stores the integral part in the object
+ * pointed to by iptr.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld modf(float x, float *iptr);
+float2 __ovld modf(float2 x, float2 *iptr);
+float3 __ovld modf(float3 x, float3 *iptr);
+float4 __ovld modf(float4 x, float4 *iptr);
+float8 __ovld modf(float8 x, float8 *iptr);
+float16 __ovld modf(float16 x, float16 *iptr);
+#ifdef cl_khr_fp64
+double __ovld modf(double x, double *iptr);
+double2 __ovld modf(double2 x, double2 *iptr);
+double3 __ovld modf(double3 x, double3 *iptr);
+double4 __ovld modf(double4 x, double4 *iptr);
+double8 __ovld modf(double8 x, double8 *iptr);
+double16 __ovld modf(double16 x, double16 *iptr);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld modf(half x, half *iptr);
+half2 __ovld modf(half2 x, half2 *iptr);
+half3 __ovld modf(half3 x, half3 *iptr);
+half4 __ovld modf(half4 x, half4 *iptr);
+half8 __ovld modf(half8 x, half8 *iptr);
+half16 __ovld modf(half16 x, half16 *iptr);
+#endif //cl_khr_fp16
+#else
+float __ovld modf(float x, __global float *iptr);
+float2 __ovld modf(float2 x, __global float2 *iptr);
+float3 __ovld modf(float3 x, __global float3 *iptr);
+float4 __ovld modf(float4 x, __global float4 *iptr);
+float8 __ovld modf(float8 x, __global float8 *iptr);
+float16 __ovld modf(float16 x, __global float16 *iptr);
+float __ovld modf(float x, __local float *iptr);
+float2 __ovld modf(float2 x, __local float2 *iptr);
+float3 __ovld modf(float3 x, __local float3 *iptr);
+float4 __ovld modf(float4 x, __local float4 *iptr);
+float8 __ovld modf(float8 x, __local float8 *iptr);
+float16 __ovld modf(float16 x, __local float16 *iptr);
+float __ovld modf(float x, __private float *iptr);
+float2 __ovld modf(float2 x, __private float2 *iptr);
+float3 __ovld modf(float3 x, __private float3 *iptr);
+float4 __ovld modf(float4 x, __private float4 *iptr);
+float8 __ovld modf(float8 x, __private float8 *iptr);
+float16 __ovld modf(float16 x, __private float16 *iptr);
+#ifdef cl_khr_fp64
+double __ovld modf(double x, __global double *iptr);
+double2 __ovld modf(double2 x, __global double2 *iptr);
+double3 __ovld modf(double3 x, __global double3 *iptr);
+double4 __ovld modf(double4 x, __global double4 *iptr);
+double8 __ovld modf(double8 x, __global double8 *iptr);
+double16 __ovld modf(double16 x, __global double16 *iptr);
+double __ovld modf(double x, __local double *iptr);
+double2 __ovld modf(double2 x, __local double2 *iptr);
+double3 __ovld modf(double3 x, __local double3 *iptr);
+double4 __ovld modf(double4 x, __local double4 *iptr);
+double8 __ovld modf(double8 x, __local double8 *iptr);
+double16 __ovld modf(double16 x, __local double16 *iptr);
+double __ovld modf(double x, __private double *iptr);
+double2 __ovld modf(double2 x, __private double2 *iptr);
+double3 __ovld modf(double3 x, __private double3 *iptr);
+double4 __ovld modf(double4 x, __private double4 *iptr);
+double8 __ovld modf(double8 x, __private double8 *iptr);
+double16 __ovld modf(double16 x, __private double16 *iptr);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld modf(half x, __global half *iptr);
+half2 __ovld modf(half2 x, __global half2 *iptr);
+half3 __ovld modf(half3 x, __global half3 *iptr);
+half4 __ovld modf(half4 x, __global half4 *iptr);
+half8 __ovld modf(half8 x, __global half8 *iptr);
+half16 __ovld modf(half16 x, __global half16 *iptr);
+half __ovld modf(half x, __local half *iptr);
+half2 __ovld modf(half2 x, __local half2 *iptr);
+half3 __ovld modf(half3 x, __local half3 *iptr);
+half4 __ovld modf(half4 x, __local half4 *iptr);
+half8 __ovld modf(half8 x, __local half8 *iptr);
+half16 __ovld modf(half16 x, __local half16 *iptr);
+half __ovld modf(half x, __private half *iptr);
+half2 __ovld modf(half2 x, __private half2 *iptr);
+half3 __ovld modf(half3 x, __private half3 *iptr);
+half4 __ovld modf(half4 x, __private half4 *iptr);
+half8 __ovld modf(half8 x, __private half8 *iptr);
+half16 __ovld modf(half16 x, __private half16 *iptr);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Returns a quiet NaN. The nancode may be placed
+ * in the significand of the resulting NaN.
+ */
+float __ovld __cnfn nan(uint nancode);
+float2 __ovld __cnfn nan(uint2 nancode);
+float3 __ovld __cnfn nan(uint3 nancode);
+float4 __ovld __cnfn nan(uint4 nancode);
+float8 __ovld __cnfn nan(uint8 nancode);
+float16 __ovld __cnfn nan(uint16 nancode);
+#ifdef cl_khr_fp64
+double __ovld __cnfn nan(ulong nancode);
+double2 __ovld __cnfn nan(ulong2 nancode);
+double3 __ovld __cnfn nan(ulong3 nancode);
+double4 __ovld __cnfn nan(ulong4 nancode);
+double8 __ovld __cnfn nan(ulong8 nancode);
+double16 __ovld __cnfn nan(ulong16 nancode);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn nan(ushort nancode);
+half2 __ovld __cnfn nan(ushort2 nancode);
+half3 __ovld __cnfn nan(ushort3 nancode);
+half4 __ovld __cnfn nan(ushort4 nancode);
+half8 __ovld __cnfn nan(ushort8 nancode);
+half16 __ovld __cnfn nan(ushort16 nancode);
+#endif //cl_khr_fp16
+
+/**
+ * Computes the next representable single-precision
+ * floating-point value following x in the direction of
+ * y. Thus, if y is less than x, nextafter() returns the
+ * largest representable floating-point number less
+ * than x.
+ */
+float __ovld __cnfn nextafter(float x, float y);
+float2 __ovld __cnfn nextafter(float2 x, float2 y);
+float3 __ovld __cnfn nextafter(float3 x, float3 y);
+float4 __ovld __cnfn nextafter(float4 x, float4 y);
+float8 __ovld __cnfn nextafter(float8 x, float8 y);
+float16 __ovld __cnfn nextafter(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn nextafter(double x, double y);
+double2 __ovld __cnfn nextafter(double2 x, double2 y);
+double3 __ovld __cnfn nextafter(double3 x, double3 y);
+double4 __ovld __cnfn nextafter(double4 x, double4 y);
+double8 __ovld __cnfn nextafter(double8 x, double8 y);
+double16 __ovld __cnfn nextafter(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn nextafter(half x, half y);
+half2 __ovld __cnfn nextafter(half2 x, half2 y);
+half3 __ovld __cnfn nextafter(half3 x, half3 y);
+half4 __ovld __cnfn nextafter(half4 x, half4 y);
+half8 __ovld __cnfn nextafter(half8 x, half8 y);
+half16 __ovld __cnfn nextafter(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y.
+ */
+float __ovld __cnfn pow(float x, float y);
+float2 __ovld __cnfn pow(float2 x, float2 y);
+float3 __ovld __cnfn pow(float3 x, float3 y);
+float4 __ovld __cnfn pow(float4 x, float4 y);
+float8 __ovld __cnfn pow(float8 x, float8 y);
+float16 __ovld __cnfn pow(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn pow(double x, double y);
+double2 __ovld __cnfn pow(double2 x, double2 y);
+double3 __ovld __cnfn pow(double3 x, double3 y);
+double4 __ovld __cnfn pow(double4 x, double4 y);
+double8 __ovld __cnfn pow(double8 x, double8 y);
+double16 __ovld __cnfn pow(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn pow(half x, half y);
+half2 __ovld __cnfn pow(half2 x, half2 y);
+half3 __ovld __cnfn pow(half3 x, half3 y);
+half4 __ovld __cnfn pow(half4 x, half4 y);
+half8 __ovld __cnfn pow(half8 x, half8 y);
+half16 __ovld __cnfn pow(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y, where y is an integer.
+ */
+float __ovld __cnfn pown(float x, int y);
+float2 __ovld __cnfn pown(float2 x, int2 y);
+float3 __ovld __cnfn pown(float3 x, int3 y);
+float4 __ovld __cnfn pown(float4 x, int4 y);
+float8 __ovld __cnfn pown(float8 x, int8 y);
+float16 __ovld __cnfn pown(float16 x, int16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn pown(double x, int y);
+double2 __ovld __cnfn pown(double2 x, int2 y);
+double3 __ovld __cnfn pown(double3 x, int3 y);
+double4 __ovld __cnfn pown(double4 x, int4 y);
+double8 __ovld __cnfn pown(double8 x, int8 y);
+double16 __ovld __cnfn pown(double16 x, int16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn pown(half x, int y);
+half2 __ovld __cnfn pown(half2 x, int2 y);
+half3 __ovld __cnfn pown(half3 x, int3 y);
+half4 __ovld __cnfn pown(half4 x, int4 y);
+half8 __ovld __cnfn pown(half8 x, int8 y);
+half16 __ovld __cnfn pown(half16 x, int16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power y, where x is >= 0.
+ */
+float __ovld __cnfn powr(float x, float y);
+float2 __ovld __cnfn powr(float2 x, float2 y);
+float3 __ovld __cnfn powr(float3 x, float3 y);
+float4 __ovld __cnfn powr(float4 x, float4 y);
+float8 __ovld __cnfn powr(float8 x, float8 y);
+float16 __ovld __cnfn powr(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn powr(double x, double y);
+double2 __ovld __cnfn powr(double2 x, double2 y);
+double3 __ovld __cnfn powr(double3 x, double3 y);
+double4 __ovld __cnfn powr(double4 x, double4 y);
+double8 __ovld __cnfn powr(double8 x, double8 y);
+double16 __ovld __cnfn powr(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn powr(half x, half y);
+half2 __ovld __cnfn powr(half2 x, half2 y);
+half3 __ovld __cnfn powr(half3 x, half3 y);
+half4 __ovld __cnfn powr(half4 x, half4 y);
+half8 __ovld __cnfn powr(half8 x, half8 y);
+half16 __ovld __cnfn powr(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the value r such that r = x - n*y, where n
+ * is the integer nearest the exact value of x/y. If there
+ * are two integers closest to x/y, n shall be the even
+ * one. If r is zero, it is given the same sign as x.
+ */
+float __ovld __cnfn remainder(float x, float y);
+float2 __ovld __cnfn remainder(float2 x, float2 y);
+float3 __ovld __cnfn remainder(float3 x, float3 y);
+float4 __ovld __cnfn remainder(float4 x, float4 y);
+float8 __ovld __cnfn remainder(float8 x, float8 y);
+float16 __ovld __cnfn remainder(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn remainder(double x, double y);
+double2 __ovld __cnfn remainder(double2 x, double2 y);
+double3 __ovld __cnfn remainder(double3 x, double3 y);
+double4 __ovld __cnfn remainder(double4 x, double4 y);
+double8 __ovld __cnfn remainder(double8 x, double8 y);
+double16 __ovld __cnfn remainder(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn remainder(half x, half y);
+half2 __ovld __cnfn remainder(half2 x, half2 y);
+half3 __ovld __cnfn remainder(half3 x, half3 y);
+half4 __ovld __cnfn remainder(half4 x, half4 y);
+half8 __ovld __cnfn remainder(half8 x, half8 y);
+half16 __ovld __cnfn remainder(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * The remquo function computes the value r such
+ * that r = x - n*y, where n is the integer nearest the
+ * exact value of x/y. If there are two integers closest
+ * to x/y, n shall be the even one. If r is zero, it is
+ * given the same sign as x. This is the same value
+ * that is returned by the remainder function.
+ * remquo also calculates the lower seven bits of the
+ * integral quotient x/y, and gives that value the same
+ * sign as x/y. It stores this signed value in the object
+ * pointed to by quo.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld remquo(float x, float y, int *quo);
+float2 __ovld remquo(float2 x, float2 y, int2 *quo);
+float3 __ovld remquo(float3 x, float3 y, int3 *quo);
+float4 __ovld remquo(float4 x, float4 y, int4 *quo);
+float8 __ovld remquo(float8 x, float8 y, int8 *quo);
+float16 __ovld remquo(float16 x, float16 y, int16 *quo);
+#ifdef cl_khr_fp64
+double __ovld remquo(double x, double y, int *quo);
+double2 __ovld remquo(double2 x, double2 y, int2 *quo);
+double3 __ovld remquo(double3 x, double3 y, int3 *quo);
+double4 __ovld remquo(double4 x, double4 y, int4 *quo);
+double8 __ovld remquo(double8 x, double8 y, int8 *quo);
+double16 __ovld remquo(double16 x, double16 y, int16 *quo);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld remquo(half x, half y, int *quo);
+half2 __ovld remquo(half2 x, half2 y, int2 *quo);
+half3 __ovld remquo(half3 x, half3 y, int3 *quo);
+half4 __ovld remquo(half4 x, half4 y, int4 *quo);
+half8 __ovld remquo(half8 x, half8 y, int8 *quo);
+half16 __ovld remquo(half16 x, half16 y, int16 *quo);
+
+#endif //cl_khr_fp16
+#else
+float __ovld remquo(float x, float y, __global int *quo);
+float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
+float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
+float4 __ovld remquo(float4 x, float4 y, __global int4 *quo);
+float8 __ovld remquo(float8 x, float8 y, __global int8 *quo);
+float16 __ovld remquo(float16 x, float16 y, __global int16 *quo);
+float __ovld remquo(float x, float y, __local int *quo);
+float2 __ovld remquo(float2 x, float2 y, __local int2 *quo);
+float3 __ovld remquo(float3 x, float3 y, __local int3 *quo);
+float4 __ovld remquo(float4 x, float4 y, __local int4 *quo);
+float8 __ovld remquo(float8 x, float8 y, __local int8 *quo);
+float16 __ovld remquo(float16 x, float16 y, __local int16 *quo);
+float __ovld remquo(float x, float y, __private int *quo);
+float2 __ovld remquo(float2 x, float2 y, __private int2 *quo);
+float3 __ovld remquo(float3 x, float3 y, __private int3 *quo);
+float4 __ovld remquo(float4 x, float4 y, __private int4 *quo);
+float8 __ovld remquo(float8 x, float8 y, __private int8 *quo);
+float16 __ovld remquo(float16 x, float16 y, __private int16 *quo);
+#ifdef cl_khr_fp64
+double __ovld remquo(double x, double y, __global int *quo);
+double2 __ovld remquo(double2 x, double2 y, __global int2 *quo);
+double3 __ovld remquo(double3 x, double3 y, __global int3 *quo);
+double4 __ovld remquo(double4 x, double4 y, __global int4 *quo);
+double8 __ovld remquo(double8 x, double8 y, __global int8 *quo);
+double16 __ovld remquo(double16 x, double16 y, __global int16 *quo);
+double __ovld remquo(double x, double y, __local int *quo);
+double2 __ovld remquo(double2 x, double2 y, __local int2 *quo);
+double3 __ovld remquo(double3 x, double3 y, __local int3 *quo);
+double4 __ovld remquo(double4 x, double4 y, __local int4 *quo);
+double8 __ovld remquo(double8 x, double8 y, __local int8 *quo);
+double16 __ovld remquo(double16 x, double16 y, __local int16 *quo);
+double __ovld remquo(double x, double y, __private int *quo);
+double2 __ovld remquo(double2 x, double2 y, __private int2 *quo);
+double3 __ovld remquo(double3 x, double3 y, __private int3 *quo);
+double4 __ovld remquo(double4 x, double4 y, __private int4 *quo);
+double8 __ovld remquo(double8 x, double8 y, __private int8 *quo);
+double16 __ovld remquo(double16 x, double16 y, __private int16 *quo);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld remquo(half x, half y, __global int *quo);
+half2 __ovld remquo(half2 x, half2 y, __global int2 *quo);
+half3 __ovld remquo(half3 x, half3 y, __global int3 *quo);
+half4 __ovld remquo(half4 x, half4 y, __global int4 *quo);
+half8 __ovld remquo(half8 x, half8 y, __global int8 *quo);
+half16 __ovld remquo(half16 x, half16 y, __global int16 *quo);
+half __ovld remquo(half x, half y, __local int *quo);
+half2 __ovld remquo(half2 x, half2 y, __local int2 *quo);
+half3 __ovld remquo(half3 x, half3 y, __local int3 *quo);
+half4 __ovld remquo(half4 x, half4 y, __local int4 *quo);
+half8 __ovld remquo(half8 x, half8 y, __local int8 *quo);
+half16 __ovld remquo(half16 x, half16 y, __local int16 *quo);
+half __ovld remquo(half x, half y, __private int *quo);
+half2 __ovld remquo(half2 x, half2 y, __private int2 *quo);
+half3 __ovld remquo(half3 x, half3 y, __private int3 *quo);
+half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
+half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
+half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+/**
+ * Round to integral value (using round to nearest
+ * even rounding mode) in floating-point format.
+ * Refer to section 7.1 for description of rounding
+ * modes.
+ */
+float __ovld __cnfn rint(float);
+float2 __ovld __cnfn rint(float2);
+float3 __ovld __cnfn rint(float3);
+float4 __ovld __cnfn rint(float4);
+float8 __ovld __cnfn rint(float8);
+float16 __ovld __cnfn rint(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rint(double);
+double2 __ovld __cnfn rint(double2);
+double3 __ovld __cnfn rint(double3);
+double4 __ovld __cnfn rint(double4);
+double8 __ovld __cnfn rint(double8);
+double16 __ovld __cnfn rint(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rint(half);
+half2 __ovld __cnfn rint(half2);
+half3 __ovld __cnfn rint(half3);
+half4 __ovld __cnfn rint(half4);
+half8 __ovld __cnfn rint(half8);
+half16 __ovld __cnfn rint(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute x to the power 1/y.
+ */
+float __ovld __cnfn rootn(float x, int y);
+float2 __ovld __cnfn rootn(float2 x, int2 y);
+float3 __ovld __cnfn rootn(float3 x, int3 y);
+float4 __ovld __cnfn rootn(float4 x, int4 y);
+float8 __ovld __cnfn rootn(float8 x, int8 y);
+float16 __ovld __cnfn rootn(float16 x, int16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rootn(double x, int y);
+double2 __ovld __cnfn rootn(double2 x, int2 y);
+double3 __ovld __cnfn rootn(double3 x, int3 y);
+double4 __ovld __cnfn rootn(double4 x, int4 y);
+double8 __ovld __cnfn rootn(double8 x, int8 y);
+double16 __ovld __cnfn rootn(double16 x, int16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rootn(half x, int y);
+half2 __ovld __cnfn rootn(half2 x, int2 y);
+half3 __ovld __cnfn rootn(half3 x, int3 y);
+half4 __ovld __cnfn rootn(half4 x, int4 y);
+half8 __ovld __cnfn rootn(half8 x, int8 y);
+half16 __ovld __cnfn rootn(half16 x, int16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Return the integral value nearest to x rounding
+ * halfway cases away from zero, regardless of the
+ * current rounding direction.
+ */
+float __ovld __cnfn round(float x);
+float2 __ovld __cnfn round(float2 x);
+float3 __ovld __cnfn round(float3 x);
+float4 __ovld __cnfn round(float4 x);
+float8 __ovld __cnfn round(float8 x);
+float16 __ovld __cnfn round(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn round(double x);
+double2 __ovld __cnfn round(double2 x);
+double3 __ovld __cnfn round(double3 x);
+double4 __ovld __cnfn round(double4 x);
+double8 __ovld __cnfn round(double8 x);
+double16 __ovld __cnfn round(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn round(half x);
+half2 __ovld __cnfn round(half2 x);
+half3 __ovld __cnfn round(half3 x);
+half4 __ovld __cnfn round(half4 x);
+half8 __ovld __cnfn round(half8 x);
+half16 __ovld __cnfn round(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute inverse square root.
+ */
+float __ovld __cnfn rsqrt(float);
+float2 __ovld __cnfn rsqrt(float2);
+float3 __ovld __cnfn rsqrt(float3);
+float4 __ovld __cnfn rsqrt(float4);
+float8 __ovld __cnfn rsqrt(float8);
+float16 __ovld __cnfn rsqrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn rsqrt(double);
+double2 __ovld __cnfn rsqrt(double2);
+double3 __ovld __cnfn rsqrt(double3);
+double4 __ovld __cnfn rsqrt(double4);
+double8 __ovld __cnfn rsqrt(double8);
+double16 __ovld __cnfn rsqrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn rsqrt(half);
+half2 __ovld __cnfn rsqrt(half2);
+half3 __ovld __cnfn rsqrt(half3);
+half4 __ovld __cnfn rsqrt(half4);
+half8 __ovld __cnfn rsqrt(half8);
+half16 __ovld __cnfn rsqrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sine.
+ */
+float __ovld __cnfn sin(float);
+float2 __ovld __cnfn sin(float2);
+float3 __ovld __cnfn sin(float3);
+float4 __ovld __cnfn sin(float4);
+float8 __ovld __cnfn sin(float8);
+float16 __ovld __cnfn sin(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sin(double);
+double2 __ovld __cnfn sin(double2);
+double3 __ovld __cnfn sin(double3);
+double4 __ovld __cnfn sin(double4);
+double8 __ovld __cnfn sin(double8);
+double16 __ovld __cnfn sin(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sin(half);
+half2 __ovld __cnfn sin(half2);
+half3 __ovld __cnfn sin(half3);
+half4 __ovld __cnfn sin(half4);
+half8 __ovld __cnfn sin(half8);
+half16 __ovld __cnfn sin(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sine and cosine of x. The computed sine
+ * is the return value and computed cosine is returned
+ * in cosval.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld sincos(float x, float *cosval);
+float2 __ovld sincos(float2 x, float2 *cosval);
+float3 __ovld sincos(float3 x, float3 *cosval);
+float4 __ovld sincos(float4 x, float4 *cosval);
+float8 __ovld sincos(float8 x, float8 *cosval);
+float16 __ovld sincos(float16 x, float16 *cosval);
+#ifdef cl_khr_fp64
+double __ovld sincos(double x, double *cosval);
+double2 __ovld sincos(double2 x, double2 *cosval);
+double3 __ovld sincos(double3 x, double3 *cosval);
+double4 __ovld sincos(double4 x, double4 *cosval);
+double8 __ovld sincos(double8 x, double8 *cosval);
+double16 __ovld sincos(double16 x, double16 *cosval);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld sincos(half x, half *cosval);
+half2 __ovld sincos(half2 x, half2 *cosval);
+half3 __ovld sincos(half3 x, half3 *cosval);
+half4 __ovld sincos(half4 x, half4 *cosval);
+half8 __ovld sincos(half8 x, half8 *cosval);
+half16 __ovld sincos(half16 x, half16 *cosval);
+#endif //cl_khr_fp16
+#else
+float __ovld sincos(float x, __global float *cosval);
+float2 __ovld sincos(float2 x, __global float2 *cosval);
+float3 __ovld sincos(float3 x, __global float3 *cosval);
+float4 __ovld sincos(float4 x, __global float4 *cosval);
+float8 __ovld sincos(float8 x, __global float8 *cosval);
+float16 __ovld sincos(float16 x, __global float16 *cosval);
+float __ovld sincos(float x, __local float *cosval);
+float2 __ovld sincos(float2 x, __local float2 *cosval);
+float3 __ovld sincos(float3 x, __local float3 *cosval);
+float4 __ovld sincos(float4 x, __local float4 *cosval);
+float8 __ovld sincos(float8 x, __local float8 *cosval);
+float16 __ovld sincos(float16 x, __local float16 *cosval);
+float __ovld sincos(float x, __private float *cosval);
+float2 __ovld sincos(float2 x, __private float2 *cosval);
+float3 __ovld sincos(float3 x, __private float3 *cosval);
+float4 __ovld sincos(float4 x, __private float4 *cosval);
+float8 __ovld sincos(float8 x, __private float8 *cosval);
+float16 __ovld sincos(float16 x, __private float16 *cosval);
+#ifdef cl_khr_fp64
+double __ovld sincos(double x, __global double *cosval);
+double2 __ovld sincos(double2 x, __global double2 *cosval);
+double3 __ovld sincos(double3 x, __global double3 *cosval);
+double4 __ovld sincos(double4 x, __global double4 *cosval);
+double8 __ovld sincos(double8 x, __global double8 *cosval);
+double16 __ovld sincos(double16 x, __global double16 *cosval);
+double __ovld sincos(double x, __local double *cosval);
+double2 __ovld sincos(double2 x, __local double2 *cosval);
+double3 __ovld sincos(double3 x, __local double3 *cosval);
+double4 __ovld sincos(double4 x, __local double4 *cosval);
+double8 __ovld sincos(double8 x, __local double8 *cosval);
+double16 __ovld sincos(double16 x, __local double16 *cosval);
+double __ovld sincos(double x, __private double *cosval);
+double2 __ovld sincos(double2 x, __private double2 *cosval);
+double3 __ovld sincos(double3 x, __private double3 *cosval);
+double4 __ovld sincos(double4 x, __private double4 *cosval);
+double8 __ovld sincos(double8 x, __private double8 *cosval);
+double16 __ovld sincos(double16 x, __private double16 *cosval);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld sincos(half x, __global half *cosval);
+half2 __ovld sincos(half2 x, __global half2 *cosval);
+half3 __ovld sincos(half3 x, __global half3 *cosval);
+half4 __ovld sincos(half4 x, __global half4 *cosval);
+half8 __ovld sincos(half8 x, __global half8 *cosval);
+half16 __ovld sincos(half16 x, __global half16 *cosval);
+half __ovld sincos(half x, __local half *cosval);
+half2 __ovld sincos(half2 x, __local half2 *cosval);
+half3 __ovld sincos(half3 x, __local half3 *cosval);
+half4 __ovld sincos(half4 x, __local half4 *cosval);
+half8 __ovld sincos(half8 x, __local half8 *cosval);
+half16 __ovld sincos(half16 x, __local half16 *cosval);
+half __ovld sincos(half x, __private half *cosval);
+half2 __ovld sincos(half2 x, __private half2 *cosval);
+half3 __ovld sincos(half3 x, __private half3 *cosval);
+half4 __ovld sincos(half4 x, __private half4 *cosval);
+half8 __ovld sincos(half8 x, __private half8 *cosval);
+half16 __ovld sincos(half16 x, __private half16 *cosval);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Compute hyperbolic sine.
+ */
+float __ovld __cnfn sinh(float);
+float2 __ovld __cnfn sinh(float2);
+float3 __ovld __cnfn sinh(float3);
+float4 __ovld __cnfn sinh(float4);
+float8 __ovld __cnfn sinh(float8);
+float16 __ovld __cnfn sinh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sinh(double);
+double2 __ovld __cnfn sinh(double2);
+double3 __ovld __cnfn sinh(double3);
+double4 __ovld __cnfn sinh(double4);
+double8 __ovld __cnfn sinh(double8);
+double16 __ovld __cnfn sinh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sinh(half);
+half2 __ovld __cnfn sinh(half2);
+half3 __ovld __cnfn sinh(half3);
+half4 __ovld __cnfn sinh(half4);
+half8 __ovld __cnfn sinh(half8);
+half16 __ovld __cnfn sinh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute sin (PI * x).
+ */
+float __ovld __cnfn sinpi(float x);
+float2 __ovld __cnfn sinpi(float2 x);
+float3 __ovld __cnfn sinpi(float3 x);
+float4 __ovld __cnfn sinpi(float4 x);
+float8 __ovld __cnfn sinpi(float8 x);
+float16 __ovld __cnfn sinpi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sinpi(double x);
+double2 __ovld __cnfn sinpi(double2 x);
+double3 __ovld __cnfn sinpi(double3 x);
+double4 __ovld __cnfn sinpi(double4 x);
+double8 __ovld __cnfn sinpi(double8 x);
+double16 __ovld __cnfn sinpi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sinpi(half x);
+half2 __ovld __cnfn sinpi(half2 x);
+half3 __ovld __cnfn sinpi(half3 x);
+half4 __ovld __cnfn sinpi(half4 x);
+half8 __ovld __cnfn sinpi(half8 x);
+half16 __ovld __cnfn sinpi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute square root.
+ */
+float __ovld __cnfn sqrt(float);
+float2 __ovld __cnfn sqrt(float2);
+float3 __ovld __cnfn sqrt(float3);
+float4 __ovld __cnfn sqrt(float4);
+float8 __ovld __cnfn sqrt(float8);
+float16 __ovld __cnfn sqrt(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sqrt(double);
+double2 __ovld __cnfn sqrt(double2);
+double3 __ovld __cnfn sqrt(double3);
+double4 __ovld __cnfn sqrt(double4);
+double8 __ovld __cnfn sqrt(double8);
+double16 __ovld __cnfn sqrt(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sqrt(half);
+half2 __ovld __cnfn sqrt(half2);
+half3 __ovld __cnfn sqrt(half3);
+half4 __ovld __cnfn sqrt(half4);
+half8 __ovld __cnfn sqrt(half8);
+half16 __ovld __cnfn sqrt(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute tangent.
+ */
+float __ovld __cnfn tan(float);
+float2 __ovld __cnfn tan(float2);
+float3 __ovld __cnfn tan(float3);
+float4 __ovld __cnfn tan(float4);
+float8 __ovld __cnfn tan(float8);
+float16 __ovld __cnfn tan(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tan(double);
+double2 __ovld __cnfn tan(double2);
+double3 __ovld __cnfn tan(double3);
+double4 __ovld __cnfn tan(double4);
+double8 __ovld __cnfn tan(double8);
+double16 __ovld __cnfn tan(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tan(half);
+half2 __ovld __cnfn tan(half2);
+half3 __ovld __cnfn tan(half3);
+half4 __ovld __cnfn tan(half4);
+half8 __ovld __cnfn tan(half8);
+half16 __ovld __cnfn tan(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute hyperbolic tangent.
+ */
+float __ovld __cnfn tanh(float);
+float2 __ovld __cnfn tanh(float2);
+float3 __ovld __cnfn tanh(float3);
+float4 __ovld __cnfn tanh(float4);
+float8 __ovld __cnfn tanh(float8);
+float16 __ovld __cnfn tanh(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tanh(double);
+double2 __ovld __cnfn tanh(double2);
+double3 __ovld __cnfn tanh(double3);
+double4 __ovld __cnfn tanh(double4);
+double8 __ovld __cnfn tanh(double8);
+double16 __ovld __cnfn tanh(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tanh(half);
+half2 __ovld __cnfn tanh(half2);
+half3 __ovld __cnfn tanh(half3);
+half4 __ovld __cnfn tanh(half4);
+half8 __ovld __cnfn tanh(half8);
+half16 __ovld __cnfn tanh(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute tan (PI * x).
+ */
+float __ovld __cnfn tanpi(float x);
+float2 __ovld __cnfn tanpi(float2 x);
+float3 __ovld __cnfn tanpi(float3 x);
+float4 __ovld __cnfn tanpi(float4 x);
+float8 __ovld __cnfn tanpi(float8 x);
+float16 __ovld __cnfn tanpi(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tanpi(double x);
+double2 __ovld __cnfn tanpi(double2 x);
+double3 __ovld __cnfn tanpi(double3 x);
+double4 __ovld __cnfn tanpi(double4 x);
+double8 __ovld __cnfn tanpi(double8 x);
+double16 __ovld __cnfn tanpi(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tanpi(half x);
+half2 __ovld __cnfn tanpi(half2 x);
+half3 __ovld __cnfn tanpi(half3 x);
+half4 __ovld __cnfn tanpi(half4 x);
+half8 __ovld __cnfn tanpi(half8 x);
+half16 __ovld __cnfn tanpi(half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Compute the gamma function.
+ */
+float __ovld __cnfn tgamma(float);
+float2 __ovld __cnfn tgamma(float2);
+float3 __ovld __cnfn tgamma(float3);
+float4 __ovld __cnfn tgamma(float4);
+float8 __ovld __cnfn tgamma(float8);
+float16 __ovld __cnfn tgamma(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn tgamma(double);
+double2 __ovld __cnfn tgamma(double2);
+double3 __ovld __cnfn tgamma(double3);
+double4 __ovld __cnfn tgamma(double4);
+double8 __ovld __cnfn tgamma(double8);
+double16 __ovld __cnfn tgamma(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn tgamma(half);
+half2 __ovld __cnfn tgamma(half2);
+half3 __ovld __cnfn tgamma(half3);
+half4 __ovld __cnfn tgamma(half4);
+half8 __ovld __cnfn tgamma(half8);
+half16 __ovld __cnfn tgamma(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Round to integral value using the round to zero
+ * rounding mode.
+ */
+float __ovld __cnfn trunc(float);
+float2 __ovld __cnfn trunc(float2);
+float3 __ovld __cnfn trunc(float3);
+float4 __ovld __cnfn trunc(float4);
+float8 __ovld __cnfn trunc(float8);
+float16 __ovld __cnfn trunc(float16);
+#ifdef cl_khr_fp64
+double __ovld __cnfn trunc(double);
+double2 __ovld __cnfn trunc(double2);
+double3 __ovld __cnfn trunc(double3);
+double4 __ovld __cnfn trunc(double4);
+double8 __ovld __cnfn trunc(double8);
+double16 __ovld __cnfn trunc(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn trunc(half);
+half2 __ovld __cnfn trunc(half2);
+half3 __ovld __cnfn trunc(half3);
+half4 __ovld __cnfn trunc(half4);
+half8 __ovld __cnfn trunc(half8);
+half16 __ovld __cnfn trunc(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Compute cosine. x must be in the range -2^16 ... +2^16.
+ */
+float __ovld __cnfn half_cos(float x);
+float2 __ovld __cnfn half_cos(float2 x);
+float3 __ovld __cnfn half_cos(float3 x);
+float4 __ovld __cnfn half_cos(float4 x);
+float8 __ovld __cnfn half_cos(float8 x);
+float16 __ovld __cnfn half_cos(float16 x);
+
+/**
+ * Compute x / y.
+ */
+float __ovld __cnfn half_divide(float x, float y);
+float2 __ovld __cnfn half_divide(float2 x, float2 y);
+float3 __ovld __cnfn half_divide(float3 x, float3 y);
+float4 __ovld __cnfn half_divide(float4 x, float4 y);
+float8 __ovld __cnfn half_divide(float8 x, float8 y);
+float16 __ovld __cnfn half_divide(float16 x, float16 y);
+
+/**
+ * Compute the base- e exponential of x.
+ */
+float __ovld __cnfn half_exp(float x);
+float2 __ovld __cnfn half_exp(float2 x);
+float3 __ovld __cnfn half_exp(float3 x);
+float4 __ovld __cnfn half_exp(float4 x);
+float8 __ovld __cnfn half_exp(float8 x);
+float16 __ovld __cnfn half_exp(float16 x);
+
+/**
+ * Compute the base- 2 exponential of x.
+ */
+float __ovld __cnfn half_exp2(float x);
+float2 __ovld __cnfn half_exp2(float2 x);
+float3 __ovld __cnfn half_exp2(float3 x);
+float4 __ovld __cnfn half_exp2(float4 x);
+float8 __ovld __cnfn half_exp2(float8 x);
+float16 __ovld __cnfn half_exp2(float16 x);
+
+/**
+ * Compute the base- 10 exponential of x.
+ */
+float __ovld __cnfn half_exp10(float x);
+float2 __ovld __cnfn half_exp10(float2 x);
+float3 __ovld __cnfn half_exp10(float3 x);
+float4 __ovld __cnfn half_exp10(float4 x);
+float8 __ovld __cnfn half_exp10(float8 x);
+float16 __ovld __cnfn half_exp10(float16 x);
+
+/**
+ * Compute natural logarithm.
+ */
+float __ovld __cnfn half_log(float x);
+float2 __ovld __cnfn half_log(float2 x);
+float3 __ovld __cnfn half_log(float3 x);
+float4 __ovld __cnfn half_log(float4 x);
+float8 __ovld __cnfn half_log(float8 x);
+float16 __ovld __cnfn half_log(float16 x);
+
+/**
+ * Compute a base 2 logarithm.
+ */
+float __ovld __cnfn half_log2(float x);
+float2 __ovld __cnfn half_log2(float2 x);
+float3 __ovld __cnfn half_log2(float3 x);
+float4 __ovld __cnfn half_log2(float4 x);
+float8 __ovld __cnfn half_log2(float8 x);
+float16 __ovld __cnfn half_log2(float16 x);
+
+/**
+ * Compute a base 10 logarithm.
+ */
+float __ovld __cnfn half_log10(float x);
+float2 __ovld __cnfn half_log10(float2 x);
+float3 __ovld __cnfn half_log10(float3 x);
+float4 __ovld __cnfn half_log10(float4 x);
+float8 __ovld __cnfn half_log10(float8 x);
+float16 __ovld __cnfn half_log10(float16 x);
+
+/**
+ * Compute x to the power y, where x is >= 0.
+ */
+float __ovld __cnfn half_powr(float x, float y);
+float2 __ovld __cnfn half_powr(float2 x, float2 y);
+float3 __ovld __cnfn half_powr(float3 x, float3 y);
+float4 __ovld __cnfn half_powr(float4 x, float4 y);
+float8 __ovld __cnfn half_powr(float8 x, float8 y);
+float16 __ovld __cnfn half_powr(float16 x, float16 y);
+
+/**
+ * Compute reciprocal.
+ */
+float __ovld __cnfn half_recip(float x);
+float2 __ovld __cnfn half_recip(float2 x);
+float3 __ovld __cnfn half_recip(float3 x);
+float4 __ovld __cnfn half_recip(float4 x);
+float8 __ovld __cnfn half_recip(float8 x);
+float16 __ovld __cnfn half_recip(float16 x);
+
+/**
+ * Compute inverse square root.
+ */
+float __ovld __cnfn half_rsqrt(float x);
+float2 __ovld __cnfn half_rsqrt(float2 x);
+float3 __ovld __cnfn half_rsqrt(float3 x);
+float4 __ovld __cnfn half_rsqrt(float4 x);
+float8 __ovld __cnfn half_rsqrt(float8 x);
+float16 __ovld __cnfn half_rsqrt(float16 x);
+
+/**
+ * Compute sine. x must be in the range -2^16 ... +2^16.
+ */
+float __ovld __cnfn half_sin(float x);
+float2 __ovld __cnfn half_sin(float2 x);
+float3 __ovld __cnfn half_sin(float3 x);
+float4 __ovld __cnfn half_sin(float4 x);
+float8 __ovld __cnfn half_sin(float8 x);
+float16 __ovld __cnfn half_sin(float16 x);
+
+/**
+ * Compute square root.
+ */
+float __ovld __cnfn half_sqrt(float x);
+float2 __ovld __cnfn half_sqrt(float2 x);
+float3 __ovld __cnfn half_sqrt(float3 x);
+float4 __ovld __cnfn half_sqrt(float4 x);
+float8 __ovld __cnfn half_sqrt(float8 x);
+float16 __ovld __cnfn half_sqrt(float16 x);
+
+/**
+ * Compute tangent. x must be in the range -216 ... +216.
+ */
+float __ovld __cnfn half_tan(float x);
+float2 __ovld __cnfn half_tan(float2 x);
+float3 __ovld __cnfn half_tan(float3 x);
+float4 __ovld __cnfn half_tan(float4 x);
+float8 __ovld __cnfn half_tan(float8 x);
+float16 __ovld __cnfn half_tan(float16 x);
+
+/**
+ * Compute cosine over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_cos(float x);
+float2 __ovld __cnfn native_cos(float2 x);
+float3 __ovld __cnfn native_cos(float3 x);
+float4 __ovld __cnfn native_cos(float4 x);
+float8 __ovld __cnfn native_cos(float8 x);
+float16 __ovld __cnfn native_cos(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_cos(double x);
+double2 __ovld __cnfn native_cos(double2 x);
+double3 __ovld __cnfn native_cos(double3 x);
+double4 __ovld __cnfn native_cos(double4 x);
+double8 __ovld __cnfn native_cos(double8 x);
+double16 __ovld __cnfn native_cos(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute x / y over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_divide(float x, float y);
+float2 __ovld __cnfn native_divide(float2 x, float2 y);
+float3 __ovld __cnfn native_divide(float3 x, float3 y);
+float4 __ovld __cnfn native_divide(float4 x, float4 y);
+float8 __ovld __cnfn native_divide(float8 x, float8 y);
+float16 __ovld __cnfn native_divide(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_divide(double x, double y);
+double2 __ovld __cnfn native_divide(double2 x, double2 y);
+double3 __ovld __cnfn native_divide(double3 x, double3 y);
+double4 __ovld __cnfn native_divide(double4 x, double4 y);
+double8 __ovld __cnfn native_divide(double8 x, double8 y);
+double16 __ovld __cnfn native_divide(double16 x, double16 y);
+#endif //cl_khr_fp64
+
+/**
+ * Compute the base- e exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp(float x);
+float2 __ovld __cnfn native_exp(float2 x);
+float3 __ovld __cnfn native_exp(float3 x);
+float4 __ovld __cnfn native_exp(float4 x);
+float8 __ovld __cnfn native_exp(float8 x);
+float16 __ovld __cnfn native_exp(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_exp(double x);
+double2 __ovld __cnfn native_exp(double2 x);
+double3 __ovld __cnfn native_exp(double3 x);
+double4 __ovld __cnfn native_exp(double4 x);
+double8 __ovld __cnfn native_exp(double8 x);
+double16 __ovld __cnfn native_exp(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute the base- 2 exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp2(float x);
+float2 __ovld __cnfn native_exp2(float2 x);
+float3 __ovld __cnfn native_exp2(float3 x);
+float4 __ovld __cnfn native_exp2(float4 x);
+float8 __ovld __cnfn native_exp2(float8 x);
+float16 __ovld __cnfn native_exp2(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_exp2(double x);
+double2 __ovld __cnfn native_exp2(double2 x);
+double3 __ovld __cnfn native_exp2(double3 x);
+double4 __ovld __cnfn native_exp2(double4 x);
+double8 __ovld __cnfn native_exp2(double8 x);
+double16 __ovld __cnfn native_exp2(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute the base- 10 exponential of x over an
+ * implementation-defined range. The maximum error is
+ * implementation-defined.
+ */
+float __ovld __cnfn native_exp10(float x);
+float2 __ovld __cnfn native_exp10(float2 x);
+float3 __ovld __cnfn native_exp10(float3 x);
+float4 __ovld __cnfn native_exp10(float4 x);
+float8 __ovld __cnfn native_exp10(float8 x);
+float16 __ovld __cnfn native_exp10(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_exp10(double x);
+double2 __ovld __cnfn native_exp10(double2 x);
+double3 __ovld __cnfn native_exp10(double3 x);
+double4 __ovld __cnfn native_exp10(double4 x);
+double8 __ovld __cnfn native_exp10(double8 x);
+double16 __ovld __cnfn native_exp10(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute natural logarithm over an implementationdefined
+ * range. The maximum error is implementation
+ * defined.
+ */
+float __ovld __cnfn native_log(float x);
+float2 __ovld __cnfn native_log(float2 x);
+float3 __ovld __cnfn native_log(float3 x);
+float4 __ovld __cnfn native_log(float4 x);
+float8 __ovld __cnfn native_log(float8 x);
+float16 __ovld __cnfn native_log(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_log(double x);
+double2 __ovld __cnfn native_log(double2 x);
+double3 __ovld __cnfn native_log(double3 x);
+double4 __ovld __cnfn native_log(double4 x);
+double8 __ovld __cnfn native_log(double8 x);
+double16 __ovld __cnfn native_log(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute a base 2 logarithm over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_log2(float x);
+float2 __ovld __cnfn native_log2(float2 x);
+float3 __ovld __cnfn native_log2(float3 x);
+float4 __ovld __cnfn native_log2(float4 x);
+float8 __ovld __cnfn native_log2(float8 x);
+float16 __ovld __cnfn native_log2(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_log2(double x);
+double2 __ovld __cnfn native_log2(double2 x);
+double3 __ovld __cnfn native_log2(double3 x);
+double4 __ovld __cnfn native_log2(double4 x);
+double8 __ovld __cnfn native_log2(double8 x);
+double16 __ovld __cnfn native_log2(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute a base 10 logarithm over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_log10(float x);
+float2 __ovld __cnfn native_log10(float2 x);
+float3 __ovld __cnfn native_log10(float3 x);
+float4 __ovld __cnfn native_log10(float4 x);
+float8 __ovld __cnfn native_log10(float8 x);
+float16 __ovld __cnfn native_log10(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_log10(double x);
+double2 __ovld __cnfn native_log10(double2 x);
+double3 __ovld __cnfn native_log10(double3 x);
+double4 __ovld __cnfn native_log10(double4 x);
+double8 __ovld __cnfn native_log10(double8 x);
+double16 __ovld __cnfn native_log10(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute x to the power y, where x is >= 0. The range of
+ * x and y are implementation-defined. The maximum error
+ * is implementation-defined.
+ */
+float __ovld __cnfn native_powr(float x, float y);
+float2 __ovld __cnfn native_powr(float2 x, float2 y);
+float3 __ovld __cnfn native_powr(float3 x, float3 y);
+float4 __ovld __cnfn native_powr(float4 x, float4 y);
+float8 __ovld __cnfn native_powr(float8 x, float8 y);
+float16 __ovld __cnfn native_powr(float16 x, float16 y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_powr(double x, double y);
+double2 __ovld __cnfn native_powr(double2 x, double2 y);
+double3 __ovld __cnfn native_powr(double3 x, double3 y);
+double4 __ovld __cnfn native_powr(double4 x, double4 y);
+double8 __ovld __cnfn native_powr(double8 x, double8 y);
+double16 __ovld __cnfn native_powr(double16 x, double16 y);
+#endif //cl_khr_fp64
+
+/**
+ * Compute reciprocal over an implementation-defined
+ * range. The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_recip(float x);
+float2 __ovld __cnfn native_recip(float2 x);
+float3 __ovld __cnfn native_recip(float3 x);
+float4 __ovld __cnfn native_recip(float4 x);
+float8 __ovld __cnfn native_recip(float8 x);
+float16 __ovld __cnfn native_recip(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_recip(double x);
+double2 __ovld __cnfn native_recip(double2 x);
+double3 __ovld __cnfn native_recip(double3 x);
+double4 __ovld __cnfn native_recip(double4 x);
+double8 __ovld __cnfn native_recip(double8 x);
+double16 __ovld __cnfn native_recip(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute inverse square root over an implementationdefined
+ * range. The maximum error is implementationdefined.
+ */
+float __ovld __cnfn native_rsqrt(float x);
+float2 __ovld __cnfn native_rsqrt(float2 x);
+float3 __ovld __cnfn native_rsqrt(float3 x);
+float4 __ovld __cnfn native_rsqrt(float4 x);
+float8 __ovld __cnfn native_rsqrt(float8 x);
+float16 __ovld __cnfn native_rsqrt(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_rsqrt(double x);
+double2 __ovld __cnfn native_rsqrt(double2 x);
+double3 __ovld __cnfn native_rsqrt(double3 x);
+double4 __ovld __cnfn native_rsqrt(double4 x);
+double8 __ovld __cnfn native_rsqrt(double8 x);
+double16 __ovld __cnfn native_rsqrt(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute sine over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_sin(float x);
+float2 __ovld __cnfn native_sin(float2 x);
+float3 __ovld __cnfn native_sin(float3 x);
+float4 __ovld __cnfn native_sin(float4 x);
+float8 __ovld __cnfn native_sin(float8 x);
+float16 __ovld __cnfn native_sin(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_sin(double x);
+double2 __ovld __cnfn native_sin(double2 x);
+double3 __ovld __cnfn native_sin(double3 x);
+double4 __ovld __cnfn native_sin(double4 x);
+double8 __ovld __cnfn native_sin(double8 x);
+double16 __ovld __cnfn native_sin(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute square root over an implementation-defined
+ * range. The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_sqrt(float x);
+float2 __ovld __cnfn native_sqrt(float2 x);
+float3 __ovld __cnfn native_sqrt(float3 x);
+float4 __ovld __cnfn native_sqrt(float4 x);
+float8 __ovld __cnfn native_sqrt(float8 x);
+float16 __ovld __cnfn native_sqrt(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_sqrt(double x);
+double2 __ovld __cnfn native_sqrt(double2 x);
+double3 __ovld __cnfn native_sqrt(double3 x);
+double4 __ovld __cnfn native_sqrt(double4 x);
+double8 __ovld __cnfn native_sqrt(double8 x);
+double16 __ovld __cnfn native_sqrt(double16 x);
+#endif //cl_khr_fp64
+
+/**
+ * Compute tangent over an implementation-defined range.
+ * The maximum error is implementation-defined.
+ */
+float __ovld __cnfn native_tan(float x);
+float2 __ovld __cnfn native_tan(float2 x);
+float3 __ovld __cnfn native_tan(float3 x);
+float4 __ovld __cnfn native_tan(float4 x);
+float8 __ovld __cnfn native_tan(float8 x);
+float16 __ovld __cnfn native_tan(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn native_tan(double x);
+double2 __ovld __cnfn native_tan(double2 x);
+double3 __ovld __cnfn native_tan(double3 x);
+double4 __ovld __cnfn native_tan(double4 x);
+double8 __ovld __cnfn native_tan(double8 x);
+double16 __ovld __cnfn native_tan(double16 x);
+#endif //cl_khr_fp64
+
+// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
+
+/**
+ * Returns | x |.
+ */
+uchar __ovld __cnfn abs(char x);
+uchar __ovld __cnfn abs(uchar x);
+uchar2 __ovld __cnfn abs(char2 x);
+uchar2 __ovld __cnfn abs(uchar2 x);
+uchar3 __ovld __cnfn abs(char3 x);
+uchar3 __ovld __cnfn abs(uchar3 x);
+uchar4 __ovld __cnfn abs(char4 x);
+uchar4 __ovld __cnfn abs(uchar4 x);
+uchar8 __ovld __cnfn abs(char8 x);
+uchar8 __ovld __cnfn abs(uchar8 x);
+uchar16 __ovld __cnfn abs(char16 x);
+uchar16 __ovld __cnfn abs(uchar16 x);
+ushort __ovld __cnfn abs(short x);
+ushort __ovld __cnfn abs(ushort x);
+ushort2 __ovld __cnfn abs(short2 x);
+ushort2 __ovld __cnfn abs(ushort2 x);
+ushort3 __ovld __cnfn abs(short3 x);
+ushort3 __ovld __cnfn abs(ushort3 x);
+ushort4 __ovld __cnfn abs(short4 x);
+ushort4 __ovld __cnfn abs(ushort4 x);
+ushort8 __ovld __cnfn abs(short8 x);
+ushort8 __ovld __cnfn abs(ushort8 x);
+ushort16 __ovld __cnfn abs(short16 x);
+ushort16 __ovld __cnfn abs(ushort16 x);
+uint __ovld __cnfn abs(int x);
+uint __ovld __cnfn abs(uint x);
+uint2 __ovld __cnfn abs(int2 x);
+uint2 __ovld __cnfn abs(uint2 x);
+uint3 __ovld __cnfn abs(int3 x);
+uint3 __ovld __cnfn abs(uint3 x);
+uint4 __ovld __cnfn abs(int4 x);
+uint4 __ovld __cnfn abs(uint4 x);
+uint8 __ovld __cnfn abs(int8 x);
+uint8 __ovld __cnfn abs(uint8 x);
+uint16 __ovld __cnfn abs(int16 x);
+uint16 __ovld __cnfn abs(uint16 x);
+ulong __ovld __cnfn abs(long x);
+ulong __ovld __cnfn abs(ulong x);
+ulong2 __ovld __cnfn abs(long2 x);
+ulong2 __ovld __cnfn abs(ulong2 x);
+ulong3 __ovld __cnfn abs(long3 x);
+ulong3 __ovld __cnfn abs(ulong3 x);
+ulong4 __ovld __cnfn abs(long4 x);
+ulong4 __ovld __cnfn abs(ulong4 x);
+ulong8 __ovld __cnfn abs(long8 x);
+ulong8 __ovld __cnfn abs(ulong8 x);
+ulong16 __ovld __cnfn abs(long16 x);
+ulong16 __ovld __cnfn abs(ulong16 x);
+
+/**
+ * Returns | x - y | without modulo overflow.
+ */
+uchar __ovld __cnfn abs_diff(char x, char y);
+uchar __ovld __cnfn abs_diff(uchar x, uchar y);
+uchar2 __ovld __cnfn abs_diff(char2 x, char2 y);
+uchar2 __ovld __cnfn abs_diff(uchar2 x, uchar2 y);
+uchar3 __ovld __cnfn abs_diff(char3 x, char3 y);
+uchar3 __ovld __cnfn abs_diff(uchar3 x, uchar3 y);
+uchar4 __ovld __cnfn abs_diff(char4 x, char4 y);
+uchar4 __ovld __cnfn abs_diff(uchar4 x, uchar4 y);
+uchar8 __ovld __cnfn abs_diff(char8 x, char8 y);
+uchar8 __ovld __cnfn abs_diff(uchar8 x, uchar8 y);
+uchar16 __ovld __cnfn abs_diff(char16 x, char16 y);
+uchar16 __ovld __cnfn abs_diff(uchar16 x, uchar16 y);
+ushort __ovld __cnfn abs_diff(short x, short y);
+ushort __ovld __cnfn abs_diff(ushort x, ushort y);
+ushort2 __ovld __cnfn abs_diff(short2 x, short2 y);
+ushort2 __ovld __cnfn abs_diff(ushort2 x, ushort2 y);
+ushort3 __ovld __cnfn abs_diff(short3 x, short3 y);
+ushort3 __ovld __cnfn abs_diff(ushort3 x, ushort3 y);
+ushort4 __ovld __cnfn abs_diff(short4 x, short4 y);
+ushort4 __ovld __cnfn abs_diff(ushort4 x, ushort4 y);
+ushort8 __ovld __cnfn abs_diff(short8 x, short8 y);
+ushort8 __ovld __cnfn abs_diff(ushort8 x, ushort8 y);
+ushort16 __ovld __cnfn abs_diff(short16 x, short16 y);
+ushort16 __ovld __cnfn abs_diff(ushort16 x, ushort16 y);
+uint __ovld __cnfn abs_diff(int x, int y);
+uint __ovld __cnfn abs_diff(uint x, uint y);
+uint2 __ovld __cnfn abs_diff(int2 x, int2 y);
+uint2 __ovld __cnfn abs_diff(uint2 x, uint2 y);
+uint3 __ovld __cnfn abs_diff(int3 x, int3 y);
+uint3 __ovld __cnfn abs_diff(uint3 x, uint3 y);
+uint4 __ovld __cnfn abs_diff(int4 x, int4 y);
+uint4 __ovld __cnfn abs_diff(uint4 x, uint4 y);
+uint8 __ovld __cnfn abs_diff(int8 x, int8 y);
+uint8 __ovld __cnfn abs_diff(uint8 x, uint8 y);
+uint16 __ovld __cnfn abs_diff(int16 x, int16 y);
+uint16 __ovld __cnfn abs_diff(uint16 x, uint16 y);
+ulong __ovld __cnfn abs_diff(long x, long y);
+ulong __ovld __cnfn abs_diff(ulong x, ulong y);
+ulong2 __ovld __cnfn abs_diff(long2 x, long2 y);
+ulong2 __ovld __cnfn abs_diff(ulong2 x, ulong2 y);
+ulong3 __ovld __cnfn abs_diff(long3 x, long3 y);
+ulong3 __ovld __cnfn abs_diff(ulong3 x, ulong3 y);
+ulong4 __ovld __cnfn abs_diff(long4 x, long4 y);
+ulong4 __ovld __cnfn abs_diff(ulong4 x, ulong4 y);
+ulong8 __ovld __cnfn abs_diff(long8 x, long8 y);
+ulong8 __ovld __cnfn abs_diff(ulong8 x, ulong8 y);
+ulong16 __ovld __cnfn abs_diff(long16 x, long16 y);
+ulong16 __ovld __cnfn abs_diff(ulong16 x, ulong16 y);
+
+/**
+ * Returns x + y and saturates the result.
+ */
+char __ovld __cnfn add_sat(char x, char y);
+uchar __ovld __cnfn add_sat(uchar x, uchar y);
+char2 __ovld __cnfn add_sat(char2 x, char2 y);
+uchar2 __ovld __cnfn add_sat(uchar2 x, uchar2 y);
+char3 __ovld __cnfn add_sat(char3 x, char3 y);
+uchar3 __ovld __cnfn add_sat(uchar3 x, uchar3 y);
+char4 __ovld __cnfn add_sat(char4 x, char4 y);
+uchar4 __ovld __cnfn add_sat(uchar4 x, uchar4 y);
+char8 __ovld __cnfn add_sat(char8 x, char8 y);
+uchar8 __ovld __cnfn add_sat(uchar8 x, uchar8 y);
+char16 __ovld __cnfn add_sat(char16 x, char16 y);
+uchar16 __ovld __cnfn add_sat(uchar16 x, uchar16 y);
+short __ovld __cnfn add_sat(short x, short y);
+ushort __ovld __cnfn add_sat(ushort x, ushort y);
+short2 __ovld __cnfn add_sat(short2 x, short2 y);
+ushort2 __ovld __cnfn add_sat(ushort2 x, ushort2 y);
+short3 __ovld __cnfn add_sat(short3 x, short3 y);
+ushort3 __ovld __cnfn add_sat(ushort3 x, ushort3 y);
+short4 __ovld __cnfn add_sat(short4 x, short4 y);
+ushort4 __ovld __cnfn add_sat(ushort4 x, ushort4 y);
+short8 __ovld __cnfn add_sat(short8 x, short8 y);
+ushort8 __ovld __cnfn add_sat(ushort8 x, ushort8 y);
+short16 __ovld __cnfn add_sat(short16 x, short16 y);
+ushort16 __ovld __cnfn add_sat(ushort16 x, ushort16 y);
+int __ovld __cnfn add_sat(int x, int y);
+uint __ovld __cnfn add_sat(uint x, uint y);
+int2 __ovld __cnfn add_sat(int2 x, int2 y);
+uint2 __ovld __cnfn add_sat(uint2 x, uint2 y);
+int3 __ovld __cnfn add_sat(int3 x, int3 y);
+uint3 __ovld __cnfn add_sat(uint3 x, uint3 y);
+int4 __ovld __cnfn add_sat(int4 x, int4 y);
+uint4 __ovld __cnfn add_sat(uint4 x, uint4 y);
+int8 __ovld __cnfn add_sat(int8 x, int8 y);
+uint8 __ovld __cnfn add_sat(uint8 x, uint8 y);
+int16 __ovld __cnfn add_sat(int16 x, int16 y);
+uint16 __ovld __cnfn add_sat(uint16 x, uint16 y);
+long __ovld __cnfn add_sat(long x, long y);
+ulong __ovld __cnfn add_sat(ulong x, ulong y);
+long2 __ovld __cnfn add_sat(long2 x, long2 y);
+ulong2 __ovld __cnfn add_sat(ulong2 x, ulong2 y);
+long3 __ovld __cnfn add_sat(long3 x, long3 y);
+ulong3 __ovld __cnfn add_sat(ulong3 x, ulong3 y);
+long4 __ovld __cnfn add_sat(long4 x, long4 y);
+ulong4 __ovld __cnfn add_sat(ulong4 x, ulong4 y);
+long8 __ovld __cnfn add_sat(long8 x, long8 y);
+ulong8 __ovld __cnfn add_sat(ulong8 x, ulong8 y);
+long16 __ovld __cnfn add_sat(long16 x, long16 y);
+ulong16 __ovld __cnfn add_sat(ulong16 x, ulong16 y);
+
+/**
+ * Returns (x + y) >> 1. The intermediate sum does
+ * not modulo overflow.
+ */
+char __ovld __cnfn hadd(char x, char y);
+uchar __ovld __cnfn hadd(uchar x, uchar y);
+char2 __ovld __cnfn hadd(char2 x, char2 y);
+uchar2 __ovld __cnfn hadd(uchar2 x, uchar2 y);
+char3 __ovld __cnfn hadd(char3 x, char3 y);
+uchar3 __ovld __cnfn hadd(uchar3 x, uchar3 y);
+char4 __ovld __cnfn hadd(char4 x, char4 y);
+uchar4 __ovld __cnfn hadd(uchar4 x, uchar4 y);
+char8 __ovld __cnfn hadd(char8 x, char8 y);
+uchar8 __ovld __cnfn hadd(uchar8 x, uchar8 y);
+char16 __ovld __cnfn hadd(char16 x, char16 y);
+uchar16 __ovld __cnfn hadd(uchar16 x, uchar16 y);
+short __ovld __cnfn hadd(short x, short y);
+ushort __ovld __cnfn hadd(ushort x, ushort y);
+short2 __ovld __cnfn hadd(short2 x, short2 y);
+ushort2 __ovld __cnfn hadd(ushort2 x, ushort2 y);
+short3 __ovld __cnfn hadd(short3 x, short3 y);
+ushort3 __ovld __cnfn hadd(ushort3 x, ushort3 y);
+short4 __ovld __cnfn hadd(short4 x, short4 y);
+ushort4 __ovld __cnfn hadd(ushort4 x, ushort4 y);
+short8 __ovld __cnfn hadd(short8 x, short8 y);
+ushort8 __ovld __cnfn hadd(ushort8 x, ushort8 y);
+short16 __ovld __cnfn hadd(short16 x, short16 y);
+ushort16 __ovld __cnfn hadd(ushort16 x, ushort16 y);
+int __ovld __cnfn hadd(int x, int y);
+uint __ovld __cnfn hadd(uint x, uint y);
+int2 __ovld __cnfn hadd(int2 x, int2 y);
+uint2 __ovld __cnfn hadd(uint2 x, uint2 y);
+int3 __ovld __cnfn hadd(int3 x, int3 y);
+uint3 __ovld __cnfn hadd(uint3 x, uint3 y);
+int4 __ovld __cnfn hadd(int4 x, int4 y);
+uint4 __ovld __cnfn hadd(uint4 x, uint4 y);
+int8 __ovld __cnfn hadd(int8 x, int8 y);
+uint8 __ovld __cnfn hadd(uint8 x, uint8 y);
+int16 __ovld __cnfn hadd(int16 x, int16 y);
+uint16 __ovld __cnfn hadd(uint16 x, uint16 y);
+long __ovld __cnfn hadd(long x, long y);
+ulong __ovld __cnfn hadd(ulong x, ulong y);
+long2 __ovld __cnfn hadd(long2 x, long2 y);
+ulong2 __ovld __cnfn hadd(ulong2 x, ulong2 y);
+long3 __ovld __cnfn hadd(long3 x, long3 y);
+ulong3 __ovld __cnfn hadd(ulong3 x, ulong3 y);
+long4 __ovld __cnfn hadd(long4 x, long4 y);
+ulong4 __ovld __cnfn hadd(ulong4 x, ulong4 y);
+long8 __ovld __cnfn hadd(long8 x, long8 y);
+ulong8 __ovld __cnfn hadd(ulong8 x, ulong8 y);
+long16 __ovld __cnfn hadd(long16 x, long16 y);
+ulong16 __ovld __cnfn hadd(ulong16 x, ulong16 y);
+
+/**
+ * Returns (x + y + 1) >> 1. The intermediate sum
+ * does not modulo overflow.
+ */
+char __ovld __cnfn rhadd(char x, char y);
+uchar __ovld __cnfn rhadd(uchar x, uchar y);
+char2 __ovld __cnfn rhadd(char2 x, char2 y);
+uchar2 __ovld __cnfn rhadd(uchar2 x, uchar2 y);
+char3 __ovld __cnfn rhadd(char3 x, char3 y);
+uchar3 __ovld __cnfn rhadd(uchar3 x, uchar3 y);
+char4 __ovld __cnfn rhadd(char4 x, char4 y);
+uchar4 __ovld __cnfn rhadd(uchar4 x, uchar4 y);
+char8 __ovld __cnfn rhadd(char8 x, char8 y);
+uchar8 __ovld __cnfn rhadd(uchar8 x, uchar8 y);
+char16 __ovld __cnfn rhadd(char16 x, char16 y);
+uchar16 __ovld __cnfn rhadd(uchar16 x, uchar16 y);
+short __ovld __cnfn rhadd(short x, short y);
+ushort __ovld __cnfn rhadd(ushort x, ushort y);
+short2 __ovld __cnfn rhadd(short2 x, short2 y);
+ushort2 __ovld __cnfn rhadd(ushort2 x, ushort2 y);
+short3 __ovld __cnfn rhadd(short3 x, short3 y);
+ushort3 __ovld __cnfn rhadd(ushort3 x, ushort3 y);
+short4 __ovld __cnfn rhadd(short4 x, short4 y);
+ushort4 __ovld __cnfn rhadd(ushort4 x, ushort4 y);
+short8 __ovld __cnfn rhadd(short8 x, short8 y);
+ushort8 __ovld __cnfn rhadd(ushort8 x, ushort8 y);
+short16 __ovld __cnfn rhadd(short16 x, short16 y);
+ushort16 __ovld __cnfn rhadd(ushort16 x, ushort16 y);
+int __ovld __cnfn rhadd(int x, int y);
+uint __ovld __cnfn rhadd(uint x, uint y);
+int2 __ovld __cnfn rhadd(int2 x, int2 y);
+uint2 __ovld __cnfn rhadd(uint2 x, uint2 y);
+int3 __ovld __cnfn rhadd(int3 x, int3 y);
+uint3 __ovld __cnfn rhadd(uint3 x, uint3 y);
+int4 __ovld __cnfn rhadd(int4 x, int4 y);
+uint4 __ovld __cnfn rhadd(uint4 x, uint4 y);
+int8 __ovld __cnfn rhadd(int8 x, int8 y);
+uint8 __ovld __cnfn rhadd(uint8 x, uint8 y);
+int16 __ovld __cnfn rhadd(int16 x, int16 y);
+uint16 __ovld __cnfn rhadd(uint16 x, uint16 y);
+long __ovld __cnfn rhadd(long x, long y);
+ulong __ovld __cnfn rhadd(ulong x, ulong y);
+long2 __ovld __cnfn rhadd(long2 x, long2 y);
+ulong2 __ovld __cnfn rhadd(ulong2 x, ulong2 y);
+long3 __ovld __cnfn rhadd(long3 x, long3 y);
+ulong3 __ovld __cnfn rhadd(ulong3 x, ulong3 y);
+long4 __ovld __cnfn rhadd(long4 x, long4 y);
+ulong4 __ovld __cnfn rhadd(ulong4 x, ulong4 y);
+long8 __ovld __cnfn rhadd(long8 x, long8 y);
+ulong8 __ovld __cnfn rhadd(ulong8 x, ulong8 y);
+long16 __ovld __cnfn rhadd(long16 x, long16 y);
+ulong16 __ovld __cnfn rhadd(ulong16 x, ulong16 y);
+
+/**
+ * Returns min(max(x, minval), maxval).
+ * Results are undefined if minval > maxval.
+ */
+char __ovld __cnfn clamp(char x, char minval, char maxval);
+uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
+char2 __ovld __cnfn clamp(char2 x, char2 minval, char2 maxval);
+uchar2 __ovld __cnfn clamp(uchar2 x, uchar2 minval, uchar2 maxval);
+char3 __ovld __cnfn clamp(char3 x, char3 minval, char3 maxval);
+uchar3 __ovld __cnfn clamp(uchar3 x, uchar3 minval, uchar3 maxval);
+char4 __ovld __cnfn clamp(char4 x, char4 minval, char4 maxval);
+uchar4 __ovld __cnfn clamp(uchar4 x, uchar4 minval, uchar4 maxval);
+char8 __ovld __cnfn clamp(char8 x, char8 minval, char8 maxval);
+uchar8 __ovld __cnfn clamp(uchar8 x, uchar8 minval, uchar8 maxval);
+char16 __ovld __cnfn clamp(char16 x, char16 minval, char16 maxval);
+uchar16 __ovld __cnfn clamp(uchar16 x, uchar16 minval, uchar16 maxval);
+short __ovld __cnfn clamp(short x, short minval, short maxval);
+ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
+short2 __ovld __cnfn clamp(short2 x, short2 minval, short2 maxval);
+ushort2 __ovld __cnfn clamp(ushort2 x, ushort2 minval, ushort2 maxval);
+short3 __ovld __cnfn clamp(short3 x, short3 minval, short3 maxval);
+ushort3 __ovld __cnfn clamp(ushort3 x, ushort3 minval, ushort3 maxval);
+short4 __ovld __cnfn clamp(short4 x, short4 minval, short4 maxval);
+ushort4 __ovld __cnfn clamp(ushort4 x, ushort4 minval, ushort4 maxval);
+short8 __ovld __cnfn clamp(short8 x, short8 minval, short8 maxval);
+ushort8 __ovld __cnfn clamp(ushort8 x, ushort8 minval, ushort8 maxval);
+short16 __ovld __cnfn clamp(short16 x, short16 minval, short16 maxval);
+ushort16 __ovld __cnfn clamp(ushort16 x, ushort16 minval, ushort16 maxval);
+int __ovld __cnfn clamp(int x, int minval, int maxval);
+uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
+int2 __ovld __cnfn clamp(int2 x, int2 minval, int2 maxval);
+uint2 __ovld __cnfn clamp(uint2 x, uint2 minval, uint2 maxval);
+int3 __ovld __cnfn clamp(int3 x, int3 minval, int3 maxval);
+uint3 __ovld __cnfn clamp(uint3 x, uint3 minval, uint3 maxval);
+int4 __ovld __cnfn clamp(int4 x, int4 minval, int4 maxval);
+uint4 __ovld __cnfn clamp(uint4 x, uint4 minval, uint4 maxval);
+int8 __ovld __cnfn clamp(int8 x, int8 minval, int8 maxval);
+uint8 __ovld __cnfn clamp(uint8 x, uint8 minval, uint8 maxval);
+int16 __ovld __cnfn clamp(int16 x, int16 minval, int16 maxval);
+uint16 __ovld __cnfn clamp(uint16 x, uint16 minval, uint16 maxval);
+long __ovld __cnfn clamp(long x, long minval, long maxval);
+ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
+long2 __ovld __cnfn clamp(long2 x, long2 minval, long2 maxval);
+ulong2 __ovld __cnfn clamp(ulong2 x, ulong2 minval, ulong2 maxval);
+long3 __ovld __cnfn clamp(long3 x, long3 minval, long3 maxval);
+ulong3 __ovld __cnfn clamp(ulong3 x, ulong3 minval, ulong3 maxval);
+long4 __ovld __cnfn clamp(long4 x, long4 minval, long4 maxval);
+ulong4 __ovld __cnfn clamp(ulong4 x, ulong4 minval, ulong4 maxval);
+long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval);
+ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval);
+long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval);
+ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval);
+char __ovld __cnfn clamp(char x, char minval, char maxval);
+uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
+char2 __ovld __cnfn clamp(char2 x, char minval, char maxval);
+uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval);
+char3 __ovld __cnfn clamp(char3 x, char minval, char maxval);
+uchar3 __ovld __cnfn clamp(uchar3 x, uchar minval, uchar maxval);
+char4 __ovld __cnfn clamp(char4 x, char minval, char maxval);
+uchar4 __ovld __cnfn clamp(uchar4 x, uchar minval, uchar maxval);
+char8 __ovld __cnfn clamp(char8 x, char minval, char maxval);
+uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval);
+char16 __ovld __cnfn clamp(char16 x, char minval, char maxval);
+uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval);
+short __ovld __cnfn clamp(short x, short minval, short maxval);
+ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
+short2 __ovld __cnfn clamp(short2 x, short minval, short maxval);
+ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval);
+short3 __ovld __cnfn clamp(short3 x, short minval, short maxval);
+ushort3 __ovld __cnfn clamp(ushort3 x, ushort minval, ushort maxval);
+short4 __ovld __cnfn clamp(short4 x, short minval, short maxval);
+ushort4 __ovld __cnfn clamp(ushort4 x, ushort minval, ushort maxval);
+short8 __ovld __cnfn clamp(short8 x, short minval, short maxval);
+ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval);
+short16 __ovld __cnfn clamp(short16 x, short minval, short maxval);
+ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval);
+int __ovld __cnfn clamp(int x, int minval, int maxval);
+uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
+int2 __ovld __cnfn clamp(int2 x, int minval, int maxval);
+uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval);
+int3 __ovld __cnfn clamp(int3 x, int minval, int maxval);
+uint3 __ovld __cnfn clamp(uint3 x, uint minval, uint maxval);
+int4 __ovld __cnfn clamp(int4 x, int minval, int maxval);
+uint4 __ovld __cnfn clamp(uint4 x, uint minval, uint maxval);
+int8 __ovld __cnfn clamp(int8 x, int minval, int maxval);
+uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval);
+int16 __ovld __cnfn clamp(int16 x, int minval, int maxval);
+uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval);
+long __ovld __cnfn clamp(long x, long minval, long maxval);
+ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
+long2 __ovld __cnfn clamp(long2 x, long minval, long maxval);
+ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval);
+long3 __ovld __cnfn clamp(long3 x, long minval, long maxval);
+ulong3 __ovld __cnfn clamp(ulong3 x, ulong minval, ulong maxval);
+long4 __ovld __cnfn clamp(long4 x, long minval, long maxval);
+ulong4 __ovld __cnfn clamp(ulong4 x, ulong minval, ulong maxval);
+long8 __ovld __cnfn clamp(long8 x, long minval, long maxval);
+ulong8 __ovld __cnfn clamp(ulong8 x, ulong minval, ulong maxval);
+long16 __ovld __cnfn clamp(long16 x, long minval, long maxval);
+ulong16 __ovld __cnfn clamp(ulong16 x, ulong minval, ulong maxval);
+
+/**
+ * Returns the number of leading 0-bits in x, starting
+ * at the most significant bit position.
+ */
+char __ovld __cnfn clz(char x);
+uchar __ovld __cnfn clz(uchar x);
+char2 __ovld __cnfn clz(char2 x);
+uchar2 __ovld __cnfn clz(uchar2 x);
+char3 __ovld __cnfn clz(char3 x);
+uchar3 __ovld __cnfn clz(uchar3 x);
+char4 __ovld __cnfn clz(char4 x);
+uchar4 __ovld __cnfn clz(uchar4 x);
+char8 __ovld __cnfn clz(char8 x);
+uchar8 __ovld __cnfn clz(uchar8 x);
+char16 __ovld __cnfn clz(char16 x);
+uchar16 __ovld __cnfn clz(uchar16 x);
+short __ovld __cnfn clz(short x);
+ushort __ovld __cnfn clz(ushort x);
+short2 __ovld __cnfn clz(short2 x);
+ushort2 __ovld __cnfn clz(ushort2 x);
+short3 __ovld __cnfn clz(short3 x);
+ushort3 __ovld __cnfn clz(ushort3 x);
+short4 __ovld __cnfn clz(short4 x);
+ushort4 __ovld __cnfn clz(ushort4 x);
+short8 __ovld __cnfn clz(short8 x);
+ushort8 __ovld __cnfn clz(ushort8 x);
+short16 __ovld __cnfn clz(short16 x);
+ushort16 __ovld __cnfn clz(ushort16 x);
+int __ovld __cnfn clz(int x);
+uint __ovld __cnfn clz(uint x);
+int2 __ovld __cnfn clz(int2 x);
+uint2 __ovld __cnfn clz(uint2 x);
+int3 __ovld __cnfn clz(int3 x);
+uint3 __ovld __cnfn clz(uint3 x);
+int4 __ovld __cnfn clz(int4 x);
+uint4 __ovld __cnfn clz(uint4 x);
+int8 __ovld __cnfn clz(int8 x);
+uint8 __ovld __cnfn clz(uint8 x);
+int16 __ovld __cnfn clz(int16 x);
+uint16 __ovld __cnfn clz(uint16 x);
+long __ovld __cnfn clz(long x);
+ulong __ovld __cnfn clz(ulong x);
+long2 __ovld __cnfn clz(long2 x);
+ulong2 __ovld __cnfn clz(ulong2 x);
+long3 __ovld __cnfn clz(long3 x);
+ulong3 __ovld __cnfn clz(ulong3 x);
+long4 __ovld __cnfn clz(long4 x);
+ulong4 __ovld __cnfn clz(ulong4 x);
+long8 __ovld __cnfn clz(long8 x);
+ulong8 __ovld __cnfn clz(ulong8 x);
+long16 __ovld __cnfn clz(long16 x);
+ulong16 __ovld __cnfn clz(ulong16 x);
+
+/**
+ * Returns the count of trailing 0-bits in x. If x is 0,
+ * returns the size in bits of the type of x or
+ * component type of x, if x is a vector.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+char __ovld ctz(char x);
+uchar __ovld ctz(uchar x);
+char2 __ovld ctz(char2 x);
+uchar2 __ovld ctz(uchar2 x);
+char3 __ovld ctz(char3 x);
+uchar3 __ovld ctz(uchar3 x);
+char4 __ovld ctz(char4 x);
+uchar4 __ovld ctz(uchar4 x);
+char8 __ovld ctz(char8 x);
+uchar8 __ovld ctz(uchar8 x);
+char16 __ovld ctz(char16 x);
+uchar16 __ovld ctz(uchar16 x);
+short __ovld ctz(short x);
+ushort __ovld ctz(ushort x);
+short2 __ovld ctz(short2 x);
+ushort2 __ovld ctz(ushort2 x);
+short3 __ovld ctz(short3 x);
+ushort3 __ovld ctz(ushort3 x);
+short4 __ovld ctz(short4 x);
+ushort4 __ovld ctz(ushort4 x);
+short8 __ovld ctz(short8 x);
+ushort8 __ovld ctz(ushort8 x);
+short16 __ovld ctz(short16 x);
+ushort16 __ovld ctz(ushort16 x);
+int __ovld ctz(int x);
+uint __ovld ctz(uint x);
+int2 __ovld ctz(int2 x);
+uint2 __ovld ctz(uint2 x);
+int3 __ovld ctz(int3 x);
+uint3 __ovld ctz(uint3 x);
+int4 __ovld ctz(int4 x);
+uint4 __ovld ctz(uint4 x);
+int8 __ovld ctz(int8 x);
+uint8 __ovld ctz(uint8 x);
+int16 __ovld ctz(int16 x);
+uint16 __ovld ctz(uint16 x);
+long __ovld ctz(long x);
+ulong __ovld ctz(ulong x);
+long2 __ovld ctz(long2 x);
+ulong2 __ovld ctz(ulong2 x);
+long3 __ovld ctz(long3 x);
+ulong3 __ovld ctz(ulong3 x);
+long4 __ovld ctz(long4 x);
+ulong4 __ovld ctz(ulong4 x);
+long8 __ovld ctz(long8 x);
+ulong8 __ovld ctz(ulong8 x);
+long16 __ovld ctz(long16 x);
+ulong16 __ovld ctz(ulong16 x);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Returns mul_hi(a, b) + c.
+ */
+char __ovld __cnfn mad_hi(char a, char b, char c);
+uchar __ovld __cnfn mad_hi(uchar a, uchar b, uchar c);
+char2 __ovld __cnfn mad_hi(char2 a, char2 b, char2 c);
+uchar2 __ovld __cnfn mad_hi(uchar2 a, uchar2 b, uchar2 c);
+char3 __ovld __cnfn mad_hi(char3 a, char3 b, char3 c);
+uchar3 __ovld __cnfn mad_hi(uchar3 a, uchar3 b, uchar3 c);
+char4 __ovld __cnfn mad_hi(char4 a, char4 b, char4 c);
+uchar4 __ovld __cnfn mad_hi(uchar4 a, uchar4 b, uchar4 c);
+char8 __ovld __cnfn mad_hi(char8 a, char8 b, char8 c);
+uchar8 __ovld __cnfn mad_hi(uchar8 a, uchar8 b, uchar8 c);
+char16 __ovld __cnfn mad_hi(char16 a, char16 b, char16 c);
+uchar16 __ovld __cnfn mad_hi(uchar16 a, uchar16 b, uchar16 c);
+short __ovld __cnfn mad_hi(short a, short b, short c);
+ushort __ovld __cnfn mad_hi(ushort a, ushort b, ushort c);
+short2 __ovld __cnfn mad_hi(short2 a, short2 b, short2 c);
+ushort2 __ovld __cnfn mad_hi(ushort2 a, ushort2 b, ushort2 c);
+short3 __ovld __cnfn mad_hi(short3 a, short3 b, short3 c);
+ushort3 __ovld __cnfn mad_hi(ushort3 a, ushort3 b, ushort3 c);
+short4 __ovld __cnfn mad_hi(short4 a, short4 b, short4 c);
+ushort4 __ovld __cnfn mad_hi(ushort4 a, ushort4 b, ushort4 c);
+short8 __ovld __cnfn mad_hi(short8 a, short8 b, short8 c);
+ushort8 __ovld __cnfn mad_hi(ushort8 a, ushort8 b, ushort8 c);
+short16 __ovld __cnfn mad_hi(short16 a, short16 b, short16 c);
+ushort16 __ovld __cnfn mad_hi(ushort16 a, ushort16 b, ushort16 c);
+int __ovld __cnfn mad_hi(int a, int b, int c);
+uint __ovld __cnfn mad_hi(uint a, uint b, uint c);
+int2 __ovld __cnfn mad_hi(int2 a, int2 b, int2 c);
+uint2 __ovld __cnfn mad_hi(uint2 a, uint2 b, uint2 c);
+int3 __ovld __cnfn mad_hi(int3 a, int3 b, int3 c);
+uint3 __ovld __cnfn mad_hi(uint3 a, uint3 b, uint3 c);
+int4 __ovld __cnfn mad_hi(int4 a, int4 b, int4 c);
+uint4 __ovld __cnfn mad_hi(uint4 a, uint4 b, uint4 c);
+int8 __ovld __cnfn mad_hi(int8 a, int8 b, int8 c);
+uint8 __ovld __cnfn mad_hi(uint8 a, uint8 b, uint8 c);
+int16 __ovld __cnfn mad_hi(int16 a, int16 b, int16 c);
+uint16 __ovld __cnfn mad_hi(uint16 a, uint16 b, uint16 c);
+long __ovld __cnfn mad_hi(long a, long b, long c);
+ulong __ovld __cnfn mad_hi(ulong a, ulong b, ulong c);
+long2 __ovld __cnfn mad_hi(long2 a, long2 b, long2 c);
+ulong2 __ovld __cnfn mad_hi(ulong2 a, ulong2 b, ulong2 c);
+long3 __ovld __cnfn mad_hi(long3 a, long3 b, long3 c);
+ulong3 __ovld __cnfn mad_hi(ulong3 a, ulong3 b, ulong3 c);
+long4 __ovld __cnfn mad_hi(long4 a, long4 b, long4 c);
+ulong4 __ovld __cnfn mad_hi(ulong4 a, ulong4 b, ulong4 c);
+long8 __ovld __cnfn mad_hi(long8 a, long8 b, long8 c);
+ulong8 __ovld __cnfn mad_hi(ulong8 a, ulong8 b, ulong8 c);
+long16 __ovld __cnfn mad_hi(long16 a, long16 b, long16 c);
+ulong16 __ovld __cnfn mad_hi(ulong16 a, ulong16 b, ulong16 c);
+
+/**
+ * Returns a * b + c and saturates the result.
+ */
+char __ovld __cnfn mad_sat(char a, char b, char c);
+uchar __ovld __cnfn mad_sat(uchar a, uchar b, uchar c);
+char2 __ovld __cnfn mad_sat(char2 a, char2 b, char2 c);
+uchar2 __ovld __cnfn mad_sat(uchar2 a, uchar2 b, uchar2 c);
+char3 __ovld __cnfn mad_sat(char3 a, char3 b, char3 c);
+uchar3 __ovld __cnfn mad_sat(uchar3 a, uchar3 b, uchar3 c);
+char4 __ovld __cnfn mad_sat(char4 a, char4 b, char4 c);
+uchar4 __ovld __cnfn mad_sat(uchar4 a, uchar4 b, uchar4 c);
+char8 __ovld __cnfn mad_sat(char8 a, char8 b, char8 c);
+uchar8 __ovld __cnfn mad_sat(uchar8 a, uchar8 b, uchar8 c);
+char16 __ovld __cnfn mad_sat(char16 a, char16 b, char16 c);
+uchar16 __ovld __cnfn mad_sat(uchar16 a, uchar16 b, uchar16 c);
+short __ovld __cnfn mad_sat(short a, short b, short c);
+ushort __ovld __cnfn mad_sat(ushort a, ushort b, ushort c);
+short2 __ovld __cnfn mad_sat(short2 a, short2 b, short2 c);
+ushort2 __ovld __cnfn mad_sat(ushort2 a, ushort2 b, ushort2 c);
+short3 __ovld __cnfn mad_sat(short3 a, short3 b, short3 c);
+ushort3 __ovld __cnfn mad_sat(ushort3 a, ushort3 b, ushort3 c);
+short4 __ovld __cnfn mad_sat(short4 a, short4 b, short4 c);
+ushort4 __ovld __cnfn mad_sat(ushort4 a, ushort4 b, ushort4 c);
+short8 __ovld __cnfn mad_sat(short8 a, short8 b, short8 c);
+ushort8 __ovld __cnfn mad_sat(ushort8 a, ushort8 b, ushort8 c);
+short16 __ovld __cnfn mad_sat(short16 a, short16 b, short16 c);
+ushort16 __ovld __cnfn mad_sat(ushort16 a, ushort16 b, ushort16 c);
+int __ovld __cnfn mad_sat(int a, int b, int c);
+uint __ovld __cnfn mad_sat(uint a, uint b, uint c);
+int2 __ovld __cnfn mad_sat(int2 a, int2 b, int2 c);
+uint2 __ovld __cnfn mad_sat(uint2 a, uint2 b, uint2 c);
+int3 __ovld __cnfn mad_sat(int3 a, int3 b, int3 c);
+uint3 __ovld __cnfn mad_sat(uint3 a, uint3 b, uint3 c);
+int4 __ovld __cnfn mad_sat(int4 a, int4 b, int4 c);
+uint4 __ovld __cnfn mad_sat(uint4 a, uint4 b, uint4 c);
+int8 __ovld __cnfn mad_sat(int8 a, int8 b, int8 c);
+uint8 __ovld __cnfn mad_sat(uint8 a, uint8 b, uint8 c);
+int16 __ovld __cnfn mad_sat(int16 a, int16 b, int16 c);
+uint16 __ovld __cnfn mad_sat(uint16 a, uint16 b, uint16 c);
+long __ovld __cnfn mad_sat(long a, long b, long c);
+ulong __ovld __cnfn mad_sat(ulong a, ulong b, ulong c);
+long2 __ovld __cnfn mad_sat(long2 a, long2 b, long2 c);
+ulong2 __ovld __cnfn mad_sat(ulong2 a, ulong2 b, ulong2 c);
+long3 __ovld __cnfn mad_sat(long3 a, long3 b, long3 c);
+ulong3 __ovld __cnfn mad_sat(ulong3 a, ulong3 b, ulong3 c);
+long4 __ovld __cnfn mad_sat(long4 a, long4 b, long4 c);
+ulong4 __ovld __cnfn mad_sat(ulong4 a, ulong4 b, ulong4 c);
+long8 __ovld __cnfn mad_sat(long8 a, long8 b, long8 c);
+ulong8 __ovld __cnfn mad_sat(ulong8 a, ulong8 b, ulong8 c);
+long16 __ovld __cnfn mad_sat(long16 a, long16 b, long16 c);
+ulong16 __ovld __cnfn mad_sat(ulong16 a, ulong16 b, ulong16 c);
+
+/**
+ * Returns y if x < y, otherwise it returns x.
+ */
+char __ovld __cnfn max(char x, char y);
+uchar __ovld __cnfn max(uchar x, uchar y);
+char2 __ovld __cnfn max(char2 x, char2 y);
+uchar2 __ovld __cnfn max(uchar2 x, uchar2 y);
+char3 __ovld __cnfn max(char3 x, char3 y);
+uchar3 __ovld __cnfn max(uchar3 x, uchar3 y);
+char4 __ovld __cnfn max(char4 x, char4 y);
+uchar4 __ovld __cnfn max(uchar4 x, uchar4 y);
+char8 __ovld __cnfn max(char8 x, char8 y);
+uchar8 __ovld __cnfn max(uchar8 x, uchar8 y);
+char16 __ovld __cnfn max(char16 x, char16 y);
+uchar16 __ovld __cnfn max(uchar16 x, uchar16 y);
+short __ovld __cnfn max(short x, short y);
+ushort __ovld __cnfn max(ushort x, ushort y);
+short2 __ovld __cnfn max(short2 x, short2 y);
+ushort2 __ovld __cnfn max(ushort2 x, ushort2 y);
+short3 __ovld __cnfn max(short3 x, short3 y);
+ushort3 __ovld __cnfn max(ushort3 x, ushort3 y);
+short4 __ovld __cnfn max(short4 x, short4 y);
+ushort4 __ovld __cnfn max(ushort4 x, ushort4 y);
+short8 __ovld __cnfn max(short8 x, short8 y);
+ushort8 __ovld __cnfn max(ushort8 x, ushort8 y);
+short16 __ovld __cnfn max(short16 x, short16 y);
+ushort16 __ovld __cnfn max(ushort16 x, ushort16 y);
+int __ovld __cnfn max(int x, int y);
+uint __ovld __cnfn max(uint x, uint y);
+int2 __ovld __cnfn max(int2 x, int2 y);
+uint2 __ovld __cnfn max(uint2 x, uint2 y);
+int3 __ovld __cnfn max(int3 x, int3 y);
+uint3 __ovld __cnfn max(uint3 x, uint3 y);
+int4 __ovld __cnfn max(int4 x, int4 y);
+uint4 __ovld __cnfn max(uint4 x, uint4 y);
+int8 __ovld __cnfn max(int8 x, int8 y);
+uint8 __ovld __cnfn max(uint8 x, uint8 y);
+int16 __ovld __cnfn max(int16 x, int16 y);
+uint16 __ovld __cnfn max(uint16 x, uint16 y);
+long __ovld __cnfn max(long x, long y);
+ulong __ovld __cnfn max(ulong x, ulong y);
+long2 __ovld __cnfn max(long2 x, long2 y);
+ulong2 __ovld __cnfn max(ulong2 x, ulong2 y);
+long3 __ovld __cnfn max(long3 x, long3 y);
+ulong3 __ovld __cnfn max(ulong3 x, ulong3 y);
+long4 __ovld __cnfn max(long4 x, long4 y);
+ulong4 __ovld __cnfn max(ulong4 x, ulong4 y);
+long8 __ovld __cnfn max(long8 x, long8 y);
+ulong8 __ovld __cnfn max(ulong8 x, ulong8 y);
+long16 __ovld __cnfn max(long16 x, long16 y);
+ulong16 __ovld __cnfn max(ulong16 x, ulong16 y);
+char __ovld __cnfn max(char x, char y);
+uchar __ovld __cnfn max(uchar x, uchar y);
+char2 __ovld __cnfn max(char2 x, char y);
+uchar2 __ovld __cnfn max(uchar2 x, uchar y);
+char3 __ovld __cnfn max(char3 x, char y);
+uchar3 __ovld __cnfn max(uchar3 x, uchar y);
+char4 __ovld __cnfn max(char4 x, char y);
+uchar4 __ovld __cnfn max(uchar4 x, uchar y);
+char8 __ovld __cnfn max(char8 x, char y);
+uchar8 __ovld __cnfn max(uchar8 x, uchar y);
+char16 __ovld __cnfn max(char16 x, char y);
+uchar16 __ovld __cnfn max(uchar16 x, uchar y);
+short __ovld __cnfn max(short x, short y);
+ushort __ovld __cnfn max(ushort x, ushort y);
+short2 __ovld __cnfn max(short2 x, short y);
+ushort2 __ovld __cnfn max(ushort2 x, ushort y);
+short3 __ovld __cnfn max(short3 x, short y);
+ushort3 __ovld __cnfn max(ushort3 x, ushort y);
+short4 __ovld __cnfn max(short4 x, short y);
+ushort4 __ovld __cnfn max(ushort4 x, ushort y);
+short8 __ovld __cnfn max(short8 x, short y);
+ushort8 __ovld __cnfn max(ushort8 x, ushort y);
+short16 __ovld __cnfn max(short16 x, short y);
+ushort16 __ovld __cnfn max(ushort16 x, ushort y);
+int __ovld __cnfn max(int x, int y);
+uint __ovld __cnfn max(uint x, uint y);
+int2 __ovld __cnfn max(int2 x, int y);
+uint2 __ovld __cnfn max(uint2 x, uint y);
+int3 __ovld __cnfn max(int3 x, int y);
+uint3 __ovld __cnfn max(uint3 x, uint y);
+int4 __ovld __cnfn max(int4 x, int y);
+uint4 __ovld __cnfn max(uint4 x, uint y);
+int8 __ovld __cnfn max(int8 x, int y);
+uint8 __ovld __cnfn max(uint8 x, uint y);
+int16 __ovld __cnfn max(int16 x, int y);
+uint16 __ovld __cnfn max(uint16 x, uint y);
+long __ovld __cnfn max(long x, long y);
+ulong __ovld __cnfn max(ulong x, ulong y);
+long2 __ovld __cnfn max(long2 x, long y);
+ulong2 __ovld __cnfn max(ulong2 x, ulong y);
+long3 __ovld __cnfn max(long3 x, long y);
+ulong3 __ovld __cnfn max(ulong3 x, ulong y);
+long4 __ovld __cnfn max(long4 x, long y);
+ulong4 __ovld __cnfn max(ulong4 x, ulong y);
+long8 __ovld __cnfn max(long8 x, long y);
+ulong8 __ovld __cnfn max(ulong8 x, ulong y);
+long16 __ovld __cnfn max(long16 x, long y);
+ulong16 __ovld __cnfn max(ulong16 x, ulong y);
+
+/**
+ * Returns y if y < x, otherwise it returns x.
+ */
+char __ovld __cnfn min(char x, char y);
+uchar __ovld __cnfn min(uchar x, uchar y);
+char2 __ovld __cnfn min(char2 x, char2 y);
+uchar2 __ovld __cnfn min(uchar2 x, uchar2 y);
+char3 __ovld __cnfn min(char3 x, char3 y);
+uchar3 __ovld __cnfn min(uchar3 x, uchar3 y);
+char4 __ovld __cnfn min(char4 x, char4 y);
+uchar4 __ovld __cnfn min(uchar4 x, uchar4 y);
+char8 __ovld __cnfn min(char8 x, char8 y);
+uchar8 __ovld __cnfn min(uchar8 x, uchar8 y);
+char16 __ovld __cnfn min(char16 x, char16 y);
+uchar16 __ovld __cnfn min(uchar16 x, uchar16 y);
+short __ovld __cnfn min(short x, short y);
+ushort __ovld __cnfn min(ushort x, ushort y);
+short2 __ovld __cnfn min(short2 x, short2 y);
+ushort2 __ovld __cnfn min(ushort2 x, ushort2 y);
+short3 __ovld __cnfn min(short3 x, short3 y);
+ushort3 __ovld __cnfn min(ushort3 x, ushort3 y);
+short4 __ovld __cnfn min(short4 x, short4 y);
+ushort4 __ovld __cnfn min(ushort4 x, ushort4 y);
+short8 __ovld __cnfn min(short8 x, short8 y);
+ushort8 __ovld __cnfn min(ushort8 x, ushort8 y);
+short16 __ovld __cnfn min(short16 x, short16 y);
+ushort16 __ovld __cnfn min(ushort16 x, ushort16 y);
+int __ovld __cnfn min(int x, int y);
+uint __ovld __cnfn min(uint x, uint y);
+int2 __ovld __cnfn min(int2 x, int2 y);
+uint2 __ovld __cnfn min(uint2 x, uint2 y);
+int3 __ovld __cnfn min(int3 x, int3 y);
+uint3 __ovld __cnfn min(uint3 x, uint3 y);
+int4 __ovld __cnfn min(int4 x, int4 y);
+uint4 __ovld __cnfn min(uint4 x, uint4 y);
+int8 __ovld __cnfn min(int8 x, int8 y);
+uint8 __ovld __cnfn min(uint8 x, uint8 y);
+int16 __ovld __cnfn min(int16 x, int16 y);
+uint16 __ovld __cnfn min(uint16 x, uint16 y);
+long __ovld __cnfn min(long x, long y);
+ulong __ovld __cnfn min(ulong x, ulong y);
+long2 __ovld __cnfn min(long2 x, long2 y);
+ulong2 __ovld __cnfn min(ulong2 x, ulong2 y);
+long3 __ovld __cnfn min(long3 x, long3 y);
+ulong3 __ovld __cnfn min(ulong3 x, ulong3 y);
+long4 __ovld __cnfn min(long4 x, long4 y);
+ulong4 __ovld __cnfn min(ulong4 x, ulong4 y);
+long8 __ovld __cnfn min(long8 x, long8 y);
+ulong8 __ovld __cnfn min(ulong8 x, ulong8 y);
+long16 __ovld __cnfn min(long16 x, long16 y);
+ulong16 __ovld __cnfn min(ulong16 x, ulong16 y);
+char __ovld __cnfn min(char x, char y);
+uchar __ovld __cnfn min(uchar x, uchar y);
+char2 __ovld __cnfn min(char2 x, char y);
+uchar2 __ovld __cnfn min(uchar2 x, uchar y);
+char3 __ovld __cnfn min(char3 x, char y);
+uchar3 __ovld __cnfn min(uchar3 x, uchar y);
+char4 __ovld __cnfn min(char4 x, char y);
+uchar4 __ovld __cnfn min(uchar4 x, uchar y);
+char8 __ovld __cnfn min(char8 x, char y);
+uchar8 __ovld __cnfn min(uchar8 x, uchar y);
+char16 __ovld __cnfn min(char16 x, char y);
+uchar16 __ovld __cnfn min(uchar16 x, uchar y);
+short __ovld __cnfn min(short x, short y);
+ushort __ovld __cnfn min(ushort x, ushort y);
+short2 __ovld __cnfn min(short2 x, short y);
+ushort2 __ovld __cnfn min(ushort2 x, ushort y);
+short3 __ovld __cnfn min(short3 x, short y);
+ushort3 __ovld __cnfn min(ushort3 x, ushort y);
+short4 __ovld __cnfn min(short4 x, short y);
+ushort4 __ovld __cnfn min(ushort4 x, ushort y);
+short8 __ovld __cnfn min(short8 x, short y);
+ushort8 __ovld __cnfn min(ushort8 x, ushort y);
+short16 __ovld __cnfn min(short16 x, short y);
+ushort16 __ovld __cnfn min(ushort16 x, ushort y);
+int __ovld __cnfn min(int x, int y);
+uint __ovld __cnfn min(uint x, uint y);
+int2 __ovld __cnfn min(int2 x, int y);
+uint2 __ovld __cnfn min(uint2 x, uint y);
+int3 __ovld __cnfn min(int3 x, int y);
+uint3 __ovld __cnfn min(uint3 x, uint y);
+int4 __ovld __cnfn min(int4 x, int y);
+uint4 __ovld __cnfn min(uint4 x, uint y);
+int8 __ovld __cnfn min(int8 x, int y);
+uint8 __ovld __cnfn min(uint8 x, uint y);
+int16 __ovld __cnfn min(int16 x, int y);
+uint16 __ovld __cnfn min(uint16 x, uint y);
+long __ovld __cnfn min(long x, long y);
+ulong __ovld __cnfn min(ulong x, ulong y);
+long2 __ovld __cnfn min(long2 x, long y);
+ulong2 __ovld __cnfn min(ulong2 x, ulong y);
+long3 __ovld __cnfn min(long3 x, long y);
+ulong3 __ovld __cnfn min(ulong3 x, ulong y);
+long4 __ovld __cnfn min(long4 x, long y);
+ulong4 __ovld __cnfn min(ulong4 x, ulong y);
+long8 __ovld __cnfn min(long8 x, long y);
+ulong8 __ovld __cnfn min(ulong8 x, ulong y);
+long16 __ovld __cnfn min(long16 x, long y);
+ulong16 __ovld __cnfn min(ulong16 x, ulong y);
+
+/**
+ * Computes x * y and returns the high half of the
+ * product of x and y.
+ */
+char __ovld __cnfn mul_hi(char x, char y);
+uchar __ovld __cnfn mul_hi(uchar x, uchar y);
+char2 __ovld __cnfn mul_hi(char2 x, char2 y);
+uchar2 __ovld __cnfn mul_hi(uchar2 x, uchar2 y);
+char3 __ovld __cnfn mul_hi(char3 x, char3 y);
+uchar3 __ovld __cnfn mul_hi(uchar3 x, uchar3 y);
+char4 __ovld __cnfn mul_hi(char4 x, char4 y);
+uchar4 __ovld __cnfn mul_hi(uchar4 x, uchar4 y);
+char8 __ovld __cnfn mul_hi(char8 x, char8 y);
+uchar8 __ovld __cnfn mul_hi(uchar8 x, uchar8 y);
+char16 __ovld __cnfn mul_hi(char16 x, char16 y);
+uchar16 __ovld __cnfn mul_hi(uchar16 x, uchar16 y);
+short __ovld __cnfn mul_hi(short x, short y);
+ushort __ovld __cnfn mul_hi(ushort x, ushort y);
+short2 __ovld __cnfn mul_hi(short2 x, short2 y);
+ushort2 __ovld __cnfn mul_hi(ushort2 x, ushort2 y);
+short3 __ovld __cnfn mul_hi(short3 x, short3 y);
+ushort3 __ovld __cnfn mul_hi(ushort3 x, ushort3 y);
+short4 __ovld __cnfn mul_hi(short4 x, short4 y);
+ushort4 __ovld __cnfn mul_hi(ushort4 x, ushort4 y);
+short8 __ovld __cnfn mul_hi(short8 x, short8 y);
+ushort8 __ovld __cnfn mul_hi(ushort8 x, ushort8 y);
+short16 __ovld __cnfn mul_hi(short16 x, short16 y);
+ushort16 __ovld __cnfn mul_hi(ushort16 x, ushort16 y);
+int __ovld __cnfn mul_hi(int x, int y);
+uint __ovld __cnfn mul_hi(uint x, uint y);
+int2 __ovld __cnfn mul_hi(int2 x, int2 y);
+uint2 __ovld __cnfn mul_hi(uint2 x, uint2 y);
+int3 __ovld __cnfn mul_hi(int3 x, int3 y);
+uint3 __ovld __cnfn mul_hi(uint3 x, uint3 y);
+int4 __ovld __cnfn mul_hi(int4 x, int4 y);
+uint4 __ovld __cnfn mul_hi(uint4 x, uint4 y);
+int8 __ovld __cnfn mul_hi(int8 x, int8 y);
+uint8 __ovld __cnfn mul_hi(uint8 x, uint8 y);
+int16 __ovld __cnfn mul_hi(int16 x, int16 y);
+uint16 __ovld __cnfn mul_hi(uint16 x, uint16 y);
+long __ovld __cnfn mul_hi(long x, long y);
+ulong __ovld __cnfn mul_hi(ulong x, ulong y);
+long2 __ovld __cnfn mul_hi(long2 x, long2 y);
+ulong2 __ovld __cnfn mul_hi(ulong2 x, ulong2 y);
+long3 __ovld __cnfn mul_hi(long3 x, long3 y);
+ulong3 __ovld __cnfn mul_hi(ulong3 x, ulong3 y);
+long4 __ovld __cnfn mul_hi(long4 x, long4 y);
+ulong4 __ovld __cnfn mul_hi(ulong4 x, ulong4 y);
+long8 __ovld __cnfn mul_hi(long8 x, long8 y);
+ulong8 __ovld __cnfn mul_hi(ulong8 x, ulong8 y);
+long16 __ovld __cnfn mul_hi(long16 x, long16 y);
+ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y);
+
+/**
+ * For each element in v, the bits are shifted left by
+ * the number of bits given by the corresponding
+ * element in i (subject to usual shift modulo rules
+ * described in section 6.3). Bits shifted off the left
+ * side of the element are shifted back in from the
+ * right.
+ */
+char __ovld __cnfn rotate(char v, char i);
+uchar __ovld __cnfn rotate(uchar v, uchar i);
+char2 __ovld __cnfn rotate(char2 v, char2 i);
+uchar2 __ovld __cnfn rotate(uchar2 v, uchar2 i);
+char3 __ovld __cnfn rotate(char3 v, char3 i);
+uchar3 __ovld __cnfn rotate(uchar3 v, uchar3 i);
+char4 __ovld __cnfn rotate(char4 v, char4 i);
+uchar4 __ovld __cnfn rotate(uchar4 v, uchar4 i);
+char8 __ovld __cnfn rotate(char8 v, char8 i);
+uchar8 __ovld __cnfn rotate(uchar8 v, uchar8 i);
+char16 __ovld __cnfn rotate(char16 v, char16 i);
+uchar16 __ovld __cnfn rotate(uchar16 v, uchar16 i);
+short __ovld __cnfn rotate(short v, short i);
+ushort __ovld __cnfn rotate(ushort v, ushort i);
+short2 __ovld __cnfn rotate(short2 v, short2 i);
+ushort2 __ovld __cnfn rotate(ushort2 v, ushort2 i);
+short3 __ovld __cnfn rotate(short3 v, short3 i);
+ushort3 __ovld __cnfn rotate(ushort3 v, ushort3 i);
+short4 __ovld __cnfn rotate(short4 v, short4 i);
+ushort4 __ovld __cnfn rotate(ushort4 v, ushort4 i);
+short8 __ovld __cnfn rotate(short8 v, short8 i);
+ushort8 __ovld __cnfn rotate(ushort8 v, ushort8 i);
+short16 __ovld __cnfn rotate(short16 v, short16 i);
+ushort16 __ovld __cnfn rotate(ushort16 v, ushort16 i);
+int __ovld __cnfn rotate(int v, int i);
+uint __ovld __cnfn rotate(uint v, uint i);
+int2 __ovld __cnfn rotate(int2 v, int2 i);
+uint2 __ovld __cnfn rotate(uint2 v, uint2 i);
+int3 __ovld __cnfn rotate(int3 v, int3 i);
+uint3 __ovld __cnfn rotate(uint3 v, uint3 i);
+int4 __ovld __cnfn rotate(int4 v, int4 i);
+uint4 __ovld __cnfn rotate(uint4 v, uint4 i);
+int8 __ovld __cnfn rotate(int8 v, int8 i);
+uint8 __ovld __cnfn rotate(uint8 v, uint8 i);
+int16 __ovld __cnfn rotate(int16 v, int16 i);
+uint16 __ovld __cnfn rotate(uint16 v, uint16 i);
+long __ovld __cnfn rotate(long v, long i);
+ulong __ovld __cnfn rotate(ulong v, ulong i);
+long2 __ovld __cnfn rotate(long2 v, long2 i);
+ulong2 __ovld __cnfn rotate(ulong2 v, ulong2 i);
+long3 __ovld __cnfn rotate(long3 v, long3 i);
+ulong3 __ovld __cnfn rotate(ulong3 v, ulong3 i);
+long4 __ovld __cnfn rotate(long4 v, long4 i);
+ulong4 __ovld __cnfn rotate(ulong4 v, ulong4 i);
+long8 __ovld __cnfn rotate(long8 v, long8 i);
+ulong8 __ovld __cnfn rotate(ulong8 v, ulong8 i);
+long16 __ovld __cnfn rotate(long16 v, long16 i);
+ulong16 __ovld __cnfn rotate(ulong16 v, ulong16 i);
+
+/**
+ * Returns x - y and saturates the result.
+ */
+char __ovld __cnfn sub_sat(char x, char y);
+uchar __ovld __cnfn sub_sat(uchar x, uchar y);
+char2 __ovld __cnfn sub_sat(char2 x, char2 y);
+uchar2 __ovld __cnfn sub_sat(uchar2 x, uchar2 y);
+char3 __ovld __cnfn sub_sat(char3 x, char3 y);
+uchar3 __ovld __cnfn sub_sat(uchar3 x, uchar3 y);
+char4 __ovld __cnfn sub_sat(char4 x, char4 y);
+uchar4 __ovld __cnfn sub_sat(uchar4 x, uchar4 y);
+char8 __ovld __cnfn sub_sat(char8 x, char8 y);
+uchar8 __ovld __cnfn sub_sat(uchar8 x, uchar8 y);
+char16 __ovld __cnfn sub_sat(char16 x, char16 y);
+uchar16 __ovld __cnfn sub_sat(uchar16 x, uchar16 y);
+short __ovld __cnfn sub_sat(short x, short y);
+ushort __ovld __cnfn sub_sat(ushort x, ushort y);
+short2 __ovld __cnfn sub_sat(short2 x, short2 y);
+ushort2 __ovld __cnfn sub_sat(ushort2 x, ushort2 y);
+short3 __ovld __cnfn sub_sat(short3 x, short3 y);
+ushort3 __ovld __cnfn sub_sat(ushort3 x, ushort3 y);
+short4 __ovld __cnfn sub_sat(short4 x, short4 y);
+ushort4 __ovld __cnfn sub_sat(ushort4 x, ushort4 y);
+short8 __ovld __cnfn sub_sat(short8 x, short8 y);
+ushort8 __ovld __cnfn sub_sat(ushort8 x, ushort8 y);
+short16 __ovld __cnfn sub_sat(short16 x, short16 y);
+ushort16 __ovld __cnfn sub_sat(ushort16 x, ushort16 y);
+int __ovld __cnfn sub_sat(int x, int y);
+uint __ovld __cnfn sub_sat(uint x, uint y);
+int2 __ovld __cnfn sub_sat(int2 x, int2 y);
+uint2 __ovld __cnfn sub_sat(uint2 x, uint2 y);
+int3 __ovld __cnfn sub_sat(int3 x, int3 y);
+uint3 __ovld __cnfn sub_sat(uint3 x, uint3 y);
+int4 __ovld __cnfn sub_sat(int4 x, int4 y);
+uint4 __ovld __cnfn sub_sat(uint4 x, uint4 y);
+int8 __ovld __cnfn sub_sat(int8 x, int8 y);
+uint8 __ovld __cnfn sub_sat(uint8 x, uint8 y);
+int16 __ovld __cnfn sub_sat(int16 x, int16 y);
+uint16 __ovld __cnfn sub_sat(uint16 x, uint16 y);
+long __ovld __cnfn sub_sat(long x, long y);
+ulong __ovld __cnfn sub_sat(ulong x, ulong y);
+long2 __ovld __cnfn sub_sat(long2 x, long2 y);
+ulong2 __ovld __cnfn sub_sat(ulong2 x, ulong2 y);
+long3 __ovld __cnfn sub_sat(long3 x, long3 y);
+ulong3 __ovld __cnfn sub_sat(ulong3 x, ulong3 y);
+long4 __ovld __cnfn sub_sat(long4 x, long4 y);
+ulong4 __ovld __cnfn sub_sat(ulong4 x, ulong4 y);
+long8 __ovld __cnfn sub_sat(long8 x, long8 y);
+ulong8 __ovld __cnfn sub_sat(ulong8 x, ulong8 y);
+long16 __ovld __cnfn sub_sat(long16 x, long16 y);
+ulong16 __ovld __cnfn sub_sat(ulong16 x, ulong16 y);
+
+/**
+ * result[i] = ((short)hi[i] << 8) | lo[i]
+ * result[i] = ((ushort)hi[i] << 8) | lo[i]
+ */
+short __ovld __cnfn upsample(char hi, uchar lo);
+ushort __ovld __cnfn upsample(uchar hi, uchar lo);
+short2 __ovld __cnfn upsample(char2 hi, uchar2 lo);
+short3 __ovld __cnfn upsample(char3 hi, uchar3 lo);
+short4 __ovld __cnfn upsample(char4 hi, uchar4 lo);
+short8 __ovld __cnfn upsample(char8 hi, uchar8 lo);
+short16 __ovld __cnfn upsample(char16 hi, uchar16 lo);
+ushort2 __ovld __cnfn upsample(uchar2 hi, uchar2 lo);
+ushort3 __ovld __cnfn upsample(uchar3 hi, uchar3 lo);
+ushort4 __ovld __cnfn upsample(uchar4 hi, uchar4 lo);
+ushort8 __ovld __cnfn upsample(uchar8 hi, uchar8 lo);
+ushort16 __ovld __cnfn upsample(uchar16 hi, uchar16 lo);
+
+/**
+ * result[i] = ((int)hi[i] << 16) | lo[i]
+ * result[i] = ((uint)hi[i] << 16) | lo[i]
+ */
+int __ovld __cnfn upsample(short hi, ushort lo);
+uint __ovld __cnfn upsample(ushort hi, ushort lo);
+int2 __ovld __cnfn upsample(short2 hi, ushort2 lo);
+int3 __ovld __cnfn upsample(short3 hi, ushort3 lo);
+int4 __ovld __cnfn upsample(short4 hi, ushort4 lo);
+int8 __ovld __cnfn upsample(short8 hi, ushort8 lo);
+int16 __ovld __cnfn upsample(short16 hi, ushort16 lo);
+uint2 __ovld __cnfn upsample(ushort2 hi, ushort2 lo);
+uint3 __ovld __cnfn upsample(ushort3 hi, ushort3 lo);
+uint4 __ovld __cnfn upsample(ushort4 hi, ushort4 lo);
+uint8 __ovld __cnfn upsample(ushort8 hi, ushort8 lo);
+uint16 __ovld __cnfn upsample(ushort16 hi, ushort16 lo);
+/**
+ * result[i] = ((long)hi[i] << 32) | lo[i]
+ * result[i] = ((ulong)hi[i] << 32) | lo[i]
+ */
+long __ovld __cnfn upsample(int hi, uint lo);
+ulong __ovld __cnfn upsample(uint hi, uint lo);
+long2 __ovld __cnfn upsample(int2 hi, uint2 lo);
+long3 __ovld __cnfn upsample(int3 hi, uint3 lo);
+long4 __ovld __cnfn upsample(int4 hi, uint4 lo);
+long8 __ovld __cnfn upsample(int8 hi, uint8 lo);
+long16 __ovld __cnfn upsample(int16 hi, uint16 lo);
+ulong2 __ovld __cnfn upsample(uint2 hi, uint2 lo);
+ulong3 __ovld __cnfn upsample(uint3 hi, uint3 lo);
+ulong4 __ovld __cnfn upsample(uint4 hi, uint4 lo);
+ulong8 __ovld __cnfn upsample(uint8 hi, uint8 lo);
+ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo);
+
+/*
+ * popcount(x): returns the number of set bit in x
+ */
+char __ovld __cnfn popcount(char x);
+uchar __ovld __cnfn popcount(uchar x);
+char2 __ovld __cnfn popcount(char2 x);
+uchar2 __ovld __cnfn popcount(uchar2 x);
+char3 __ovld __cnfn popcount(char3 x);
+uchar3 __ovld __cnfn popcount(uchar3 x);
+char4 __ovld __cnfn popcount(char4 x);
+uchar4 __ovld __cnfn popcount(uchar4 x);
+char8 __ovld __cnfn popcount(char8 x);
+uchar8 __ovld __cnfn popcount(uchar8 x);
+char16 __ovld __cnfn popcount(char16 x);
+uchar16 __ovld __cnfn popcount(uchar16 x);
+short __ovld __cnfn popcount(short x);
+ushort __ovld __cnfn popcount(ushort x);
+short2 __ovld __cnfn popcount(short2 x);
+ushort2 __ovld __cnfn popcount(ushort2 x);
+short3 __ovld __cnfn popcount(short3 x);
+ushort3 __ovld __cnfn popcount(ushort3 x);
+short4 __ovld __cnfn popcount(short4 x);
+ushort4 __ovld __cnfn popcount(ushort4 x);
+short8 __ovld __cnfn popcount(short8 x);
+ushort8 __ovld __cnfn popcount(ushort8 x);
+short16 __ovld __cnfn popcount(short16 x);
+ushort16 __ovld __cnfn popcount(ushort16 x);
+int __ovld __cnfn popcount(int x);
+uint __ovld __cnfn popcount(uint x);
+int2 __ovld __cnfn popcount(int2 x);
+uint2 __ovld __cnfn popcount(uint2 x);
+int3 __ovld __cnfn popcount(int3 x);
+uint3 __ovld __cnfn popcount(uint3 x);
+int4 __ovld __cnfn popcount(int4 x);
+uint4 __ovld __cnfn popcount(uint4 x);
+int8 __ovld __cnfn popcount(int8 x);
+uint8 __ovld __cnfn popcount(uint8 x);
+int16 __ovld __cnfn popcount(int16 x);
+uint16 __ovld __cnfn popcount(uint16 x);
+long __ovld __cnfn popcount(long x);
+ulong __ovld __cnfn popcount(ulong x);
+long2 __ovld __cnfn popcount(long2 x);
+ulong2 __ovld __cnfn popcount(ulong2 x);
+long3 __ovld __cnfn popcount(long3 x);
+ulong3 __ovld __cnfn popcount(ulong3 x);
+long4 __ovld __cnfn popcount(long4 x);
+ulong4 __ovld __cnfn popcount(ulong4 x);
+long8 __ovld __cnfn popcount(long8 x);
+ulong8 __ovld __cnfn popcount(ulong8 x);
+long16 __ovld __cnfn popcount(long16 x);
+ulong16 __ovld __cnfn popcount(ulong16 x);
+
+/**
+ * Multiply two 24-bit integer values x and y and add
+ * the 32-bit integer result to the 32-bit integer z.
+ * Refer to definition of mul24 to see how the 24-bit
+ * integer multiplication is performed.
+ */
+int __ovld __cnfn mad24(int x, int y, int z);
+uint __ovld __cnfn mad24(uint x, uint y, uint z);
+int2 __ovld __cnfn mad24(int2 x, int2 y, int2 z);
+uint2 __ovld __cnfn mad24(uint2 x, uint2 y, uint2 z);
+int3 __ovld __cnfn mad24(int3 x, int3 y, int3 z);
+uint3 __ovld __cnfn mad24(uint3 x, uint3 y, uint3 z);
+int4 __ovld __cnfn mad24(int4 x, int4 y, int4 z);
+uint4 __ovld __cnfn mad24(uint4 x, uint4 y, uint4 z);
+int8 __ovld __cnfn mad24(int8 x, int8 y, int8 z);
+uint8 __ovld __cnfn mad24(uint8 x, uint8 y, uint8 z);
+int16 __ovld __cnfn mad24(int16 x, int16 y, int16 z);
+uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z);
+
+/**
+ * Multiply two 24-bit integer values x and y. x and y
+ * are 32-bit integers but only the low 24-bits are used
+ * to perform the multiplication. mul24 should only
+ * be used when values in x and y are in the range [-
+ * 2^23, 2^23-1] if x and y are signed integers and in the
+ * range [0, 2^24-1] if x and y are unsigned integers. If
+ * x and y are not in this range, the multiplication
+ * result is implementation-defined.
+ */
+int __ovld __cnfn mul24(int x, int y);
+uint __ovld __cnfn mul24(uint x, uint y);
+int2 __ovld __cnfn mul24(int2 x, int2 y);
+uint2 __ovld __cnfn mul24(uint2 x, uint2 y);
+int3 __ovld __cnfn mul24(int3 x, int3 y);
+uint3 __ovld __cnfn mul24(uint3 x, uint3 y);
+int4 __ovld __cnfn mul24(int4 x, int4 y);
+uint4 __ovld __cnfn mul24(uint4 x, uint4 y);
+int8 __ovld __cnfn mul24(int8 x, int8 y);
+uint8 __ovld __cnfn mul24(uint8 x, uint8 y);
+int16 __ovld __cnfn mul24(int16 x, int16 y);
+uint16 __ovld __cnfn mul24(uint16 x, uint16 y);
+
+// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions
+
+/**
+ * Returns fmin(fmax(x, minval), maxval).
+ * Results are undefined if minval > maxval.
+ */
+float __ovld __cnfn clamp(float x, float minval, float maxval);
+float2 __ovld __cnfn clamp(float2 x, float2 minval, float2 maxval);
+float3 __ovld __cnfn clamp(float3 x, float3 minval, float3 maxval);
+float4 __ovld __cnfn clamp(float4 x, float4 minval, float4 maxval);
+float8 __ovld __cnfn clamp(float8 x, float8 minval, float8 maxval);
+float16 __ovld __cnfn clamp(float16 x, float16 minval, float16 maxval);
+float2 __ovld __cnfn clamp(float2 x, float minval, float maxval);
+float3 __ovld __cnfn clamp(float3 x, float minval, float maxval);
+float4 __ovld __cnfn clamp(float4 x, float minval, float maxval);
+float8 __ovld __cnfn clamp(float8 x, float minval, float maxval);
+float16 __ovld __cnfn clamp(float16 x, float minval, float maxval);
+#ifdef cl_khr_fp64
+double __ovld __cnfn clamp(double x, double minval, double maxval);
+double2 __ovld __cnfn clamp(double2 x, double2 minval, double2 maxval);
+double3 __ovld __cnfn clamp(double3 x, double3 minval, double3 maxval);
+double4 __ovld __cnfn clamp(double4 x, double4 minval, double4 maxval);
+double8 __ovld __cnfn clamp(double8 x, double8 minval, double8 maxval);
+double16 __ovld __cnfn clamp(double16 x, double16 minval, double16 maxval);
+double2 __ovld __cnfn clamp(double2 x, double minval, double maxval);
+double3 __ovld __cnfn clamp(double3 x, double minval, double maxval);
+double4 __ovld __cnfn clamp(double4 x, double minval, double maxval);
+double8 __ovld __cnfn clamp(double8 x, double minval, double maxval);
+double16 __ovld __cnfn clamp(double16 x, double minval, double maxval);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn clamp(half x, half minval, half maxval);
+half2 __ovld __cnfn clamp(half2 x, half2 minval, half2 maxval);
+half3 __ovld __cnfn clamp(half3 x, half3 minval, half3 maxval);
+half4 __ovld __cnfn clamp(half4 x, half4 minval, half4 maxval);
+half8 __ovld __cnfn clamp(half8 x, half8 minval, half8 maxval);
+half16 __ovld __cnfn clamp(half16 x, half16 minval, half16 maxval);
+half2 __ovld __cnfn clamp(half2 x, half minval, half maxval);
+half3 __ovld __cnfn clamp(half3 x, half minval, half maxval);
+half4 __ovld __cnfn clamp(half4 x, half minval, half maxval);
+half8 __ovld __cnfn clamp(half8 x, half minval, half maxval);
+half16 __ovld __cnfn clamp(half16 x, half minval, half maxval);
+#endif //cl_khr_fp16
+
+/**
+ * Converts radians to degrees, i.e. (180 / PI) *
+ * radians.
+ */
+float __ovld __cnfn degrees(float radians);
+float2 __ovld __cnfn degrees(float2 radians);
+float3 __ovld __cnfn degrees(float3 radians);
+float4 __ovld __cnfn degrees(float4 radians);
+float8 __ovld __cnfn degrees(float8 radians);
+float16 __ovld __cnfn degrees(float16 radians);
+#ifdef cl_khr_fp64
+double __ovld __cnfn degrees(double radians);
+double2 __ovld __cnfn degrees(double2 radians);
+double3 __ovld __cnfn degrees(double3 radians);
+double4 __ovld __cnfn degrees(double4 radians);
+double8 __ovld __cnfn degrees(double8 radians);
+double16 __ovld __cnfn degrees(double16 radians);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn degrees(half radians);
+half2 __ovld __cnfn degrees(half2 radians);
+half3 __ovld __cnfn degrees(half3 radians);
+half4 __ovld __cnfn degrees(half4 radians);
+half8 __ovld __cnfn degrees(half8 radians);
+half16 __ovld __cnfn degrees(half16 radians);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if x < y, otherwise it returns x. If x and y
+ * are infinite or NaN, the return values are undefined.
+ */
+float __ovld __cnfn max(float x, float y);
+float2 __ovld __cnfn max(float2 x, float2 y);
+float3 __ovld __cnfn max(float3 x, float3 y);
+float4 __ovld __cnfn max(float4 x, float4 y);
+float8 __ovld __cnfn max(float8 x, float8 y);
+float16 __ovld __cnfn max(float16 x, float16 y);
+float2 __ovld __cnfn max(float2 x, float y);
+float3 __ovld __cnfn max(float3 x, float y);
+float4 __ovld __cnfn max(float4 x, float y);
+float8 __ovld __cnfn max(float8 x, float y);
+float16 __ovld __cnfn max(float16 x, float y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn max(double x, double y);
+double2 __ovld __cnfn max(double2 x, double2 y);
+double3 __ovld __cnfn max(double3 x, double3 y);
+double4 __ovld __cnfn max(double4 x, double4 y);
+double8 __ovld __cnfn max(double8 x, double8 y);
+double16 __ovld __cnfn max(double16 x, double16 y);
+double2 __ovld __cnfn max(double2 x, double y);
+double3 __ovld __cnfn max(double3 x, double y);
+double4 __ovld __cnfn max(double4 x, double y);
+double8 __ovld __cnfn max(double8 x, double y);
+double16 __ovld __cnfn max(double16 x, double y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn max(half x, half y);
+half2 __ovld __cnfn max(half2 x, half2 y);
+half3 __ovld __cnfn max(half3 x, half3 y);
+half4 __ovld __cnfn max(half4 x, half4 y);
+half8 __ovld __cnfn max(half8 x, half8 y);
+half16 __ovld __cnfn max(half16 x, half16 y);
+half2 __ovld __cnfn max(half2 x, half y);
+half3 __ovld __cnfn max(half3 x, half y);
+half4 __ovld __cnfn max(half4 x, half y);
+half8 __ovld __cnfn max(half8 x, half y);
+half16 __ovld __cnfn max(half16 x, half y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns y if y < x, otherwise it returns x. If x and y
+ * are infinite or NaN, the return values are undefined.
+ */
+float __ovld __cnfn min(float x, float y);
+float2 __ovld __cnfn min(float2 x, float2 y);
+float3 __ovld __cnfn min(float3 x, float3 y);
+float4 __ovld __cnfn min(float4 x, float4 y);
+float8 __ovld __cnfn min(float8 x, float8 y);
+float16 __ovld __cnfn min(float16 x, float16 y);
+float2 __ovld __cnfn min(float2 x, float y);
+float3 __ovld __cnfn min(float3 x, float y);
+float4 __ovld __cnfn min(float4 x, float y);
+float8 __ovld __cnfn min(float8 x, float y);
+float16 __ovld __cnfn min(float16 x, float y);
+#ifdef cl_khr_fp64
+double __ovld __cnfn min(double x, double y);
+double2 __ovld __cnfn min(double2 x, double2 y);
+double3 __ovld __cnfn min(double3 x, double3 y);
+double4 __ovld __cnfn min(double4 x, double4 y);
+double8 __ovld __cnfn min(double8 x, double8 y);
+double16 __ovld __cnfn min(double16 x, double16 y);
+double2 __ovld __cnfn min(double2 x, double y);
+double3 __ovld __cnfn min(double3 x, double y);
+double4 __ovld __cnfn min(double4 x, double y);
+double8 __ovld __cnfn min(double8 x, double y);
+double16 __ovld __cnfn min(double16 x, double y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn min(half x, half y);
+half2 __ovld __cnfn min(half2 x, half2 y);
+half3 __ovld __cnfn min(half3 x, half3 y);
+half4 __ovld __cnfn min(half4 x, half4 y);
+half8 __ovld __cnfn min(half8 x, half8 y);
+half16 __ovld __cnfn min(half16 x, half16 y);
+half2 __ovld __cnfn min(half2 x, half y);
+half3 __ovld __cnfn min(half3 x, half y);
+half4 __ovld __cnfn min(half4 x, half y);
+half8 __ovld __cnfn min(half8 x, half y);
+half16 __ovld __cnfn min(half16 x, half y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the linear blend of x & y implemented as:
+ * x + (y - x) * a
+ * a must be a value in the range 0.0 ... 1.0. If a is not
+ * in the range 0.0 ... 1.0, the return values are
+ * undefined.
+ */
+float __ovld __cnfn mix(float x, float y, float a);
+float2 __ovld __cnfn mix(float2 x, float2 y, float2 a);
+float3 __ovld __cnfn mix(float3 x, float3 y, float3 a);
+float4 __ovld __cnfn mix(float4 x, float4 y, float4 a);
+float8 __ovld __cnfn mix(float8 x, float8 y, float8 a);
+float16 __ovld __cnfn mix(float16 x, float16 y, float16 a);
+float2 __ovld __cnfn mix(float2 x, float2 y, float a);
+float3 __ovld __cnfn mix(float3 x, float3 y, float a);
+float4 __ovld __cnfn mix(float4 x, float4 y, float a);
+float8 __ovld __cnfn mix(float8 x, float8 y, float a);
+float16 __ovld __cnfn mix(float16 x, float16 y, float a);
+#ifdef cl_khr_fp64
+double __ovld __cnfn mix(double x, double y, double a);
+double2 __ovld __cnfn mix(double2 x, double2 y, double2 a);
+double3 __ovld __cnfn mix(double3 x, double3 y, double3 a);
+double4 __ovld __cnfn mix(double4 x, double4 y, double4 a);
+double8 __ovld __cnfn mix(double8 x, double8 y, double8 a);
+double16 __ovld __cnfn mix(double16 x, double16 y, double16 a);
+double2 __ovld __cnfn mix(double2 x, double2 y, double a);
+double3 __ovld __cnfn mix(double3 x, double3 y, double a);
+double4 __ovld __cnfn mix(double4 x, double4 y, double a);
+double8 __ovld __cnfn mix(double8 x, double8 y, double a);
+double16 __ovld __cnfn mix(double16 x, double16 y, double a);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn mix(half x, half y, half a);
+half2 __ovld __cnfn mix(half2 x, half2 y, half2 a);
+half3 __ovld __cnfn mix(half3 x, half3 y, half3 a);
+half4 __ovld __cnfn mix(half4 x, half4 y, half4 a);
+half8 __ovld __cnfn mix(half8 x, half8 y, half8 a);
+half16 __ovld __cnfn mix(half16 x, half16 y, half16 a);
+half2 __ovld __cnfn mix(half2 x, half2 y, half a);
+half3 __ovld __cnfn mix(half3 x, half3 y, half a);
+half4 __ovld __cnfn mix(half4 x, half4 y, half a);
+half8 __ovld __cnfn mix(half8 x, half8 y, half a);
+half16 __ovld __cnfn mix(half16 x, half16 y, half a);
+#endif //cl_khr_fp16
+
+/**
+ * Converts degrees to radians, i.e. (PI / 180) *
+ * degrees.
+ */
+float __ovld __cnfn radians(float degrees);
+float2 __ovld __cnfn radians(float2 degrees);
+float3 __ovld __cnfn radians(float3 degrees);
+float4 __ovld __cnfn radians(float4 degrees);
+float8 __ovld __cnfn radians(float8 degrees);
+float16 __ovld __cnfn radians(float16 degrees);
+#ifdef cl_khr_fp64
+double __ovld __cnfn radians(double degrees);
+double2 __ovld __cnfn radians(double2 degrees);
+double3 __ovld __cnfn radians(double3 degrees);
+double4 __ovld __cnfn radians(double4 degrees);
+double8 __ovld __cnfn radians(double8 degrees);
+double16 __ovld __cnfn radians(double16 degrees);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn radians(half degrees);
+half2 __ovld __cnfn radians(half2 degrees);
+half3 __ovld __cnfn radians(half3 degrees);
+half4 __ovld __cnfn radians(half4 degrees);
+half8 __ovld __cnfn radians(half8 degrees);
+half16 __ovld __cnfn radians(half16 degrees);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 0.0 if x < edge, otherwise it returns 1.0.
+ */
+float __ovld __cnfn step(float edge, float x);
+float2 __ovld __cnfn step(float2 edge, float2 x);
+float3 __ovld __cnfn step(float3 edge, float3 x);
+float4 __ovld __cnfn step(float4 edge, float4 x);
+float8 __ovld __cnfn step(float8 edge, float8 x);
+float16 __ovld __cnfn step(float16 edge, float16 x);
+float2 __ovld __cnfn step(float edge, float2 x);
+float3 __ovld __cnfn step(float edge, float3 x);
+float4 __ovld __cnfn step(float edge, float4 x);
+float8 __ovld __cnfn step(float edge, float8 x);
+float16 __ovld __cnfn step(float edge, float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn step(double edge, double x);
+double2 __ovld __cnfn step(double2 edge, double2 x);
+double3 __ovld __cnfn step(double3 edge, double3 x);
+double4 __ovld __cnfn step(double4 edge, double4 x);
+double8 __ovld __cnfn step(double8 edge, double8 x);
+double16 __ovld __cnfn step(double16 edge, double16 x);
+double2 __ovld __cnfn step(double edge, double2 x);
+double3 __ovld __cnfn step(double edge, double3 x);
+double4 __ovld __cnfn step(double edge, double4 x);
+double8 __ovld __cnfn step(double edge, double8 x);
+double16 __ovld __cnfn step(double edge, double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn step(half edge, half x);
+half2 __ovld __cnfn step(half2 edge, half2 x);
+half3 __ovld __cnfn step(half3 edge, half3 x);
+half4 __ovld __cnfn step(half4 edge, half4 x);
+half8 __ovld __cnfn step(half8 edge, half8 x);
+half16 __ovld __cnfn step(half16 edge, half16 x);
+half __ovld __cnfn step(half edge, half x);
+half2 __ovld __cnfn step(half edge, half2 x);
+half3 __ovld __cnfn step(half edge, half3 x);
+half4 __ovld __cnfn step(half edge, half4 x);
+half8 __ovld __cnfn step(half edge, half8 x);
+half16 __ovld __cnfn step(half edge, half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and
+ * performs smooth Hermite interpolation between 0
+ * and 1when edge0 < x < edge1. This is useful in
+ * cases where you would want a threshold function
+ * with a smooth transition.
+ * This is equivalent to:
+ * gentype t;
+ * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1);
+ * return t * t * (3 - 2 * t);
+ * Results are undefined if edge0 >= edge1 or if x,
+ * edge0 or edge1 is a NaN.
+ */
+float __ovld __cnfn smoothstep(float edge0, float edge1, float x);
+float2 __ovld __cnfn smoothstep(float2 edge0, float2 edge1, float2 x);
+float3 __ovld __cnfn smoothstep(float3 edge0, float3 edge1, float3 x);
+float4 __ovld __cnfn smoothstep(float4 edge0, float4 edge1, float4 x);
+float8 __ovld __cnfn smoothstep(float8 edge0, float8 edge1, float8 x);
+float16 __ovld __cnfn smoothstep(float16 edge0, float16 edge1, float16 x);
+float2 __ovld __cnfn smoothstep(float edge0, float edge1, float2 x);
+float3 __ovld __cnfn smoothstep(float edge0, float edge1, float3 x);
+float4 __ovld __cnfn smoothstep(float edge0, float edge1, float4 x);
+float8 __ovld __cnfn smoothstep(float edge0, float edge1, float8 x);
+float16 __ovld __cnfn smoothstep(float edge0, float edge1, float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn smoothstep(double edge0, double edge1, double x);
+double2 __ovld __cnfn smoothstep(double2 edge0, double2 edge1, double2 x);
+double3 __ovld __cnfn smoothstep(double3 edge0, double3 edge1, double3 x);
+double4 __ovld __cnfn smoothstep(double4 edge0, double4 edge1, double4 x);
+double8 __ovld __cnfn smoothstep(double8 edge0, double8 edge1, double8 x);
+double16 __ovld __cnfn smoothstep(double16 edge0, double16 edge1, double16 x);
+double2 __ovld __cnfn smoothstep(double edge0, double edge1, double2 x);
+double3 __ovld __cnfn smoothstep(double edge0, double edge1, double3 x);
+double4 __ovld __cnfn smoothstep(double edge0, double edge1, double4 x);
+double8 __ovld __cnfn smoothstep(double edge0, double edge1, double8 x);
+double16 __ovld __cnfn smoothstep(double edge0, double edge1, double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
+half2 __ovld __cnfn smoothstep(half2 edge0, half2 edge1, half2 x);
+half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x);
+half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x);
+half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x);
+half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x);
+half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
+half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x);
+half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x);
+half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x);
+half8 __ovld __cnfn smoothstep(half edge0, half edge1, half8 x);
+half16 __ovld __cnfn smoothstep(half edge0, half edge1, half16 x);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =
+ * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.
+ */
+float __ovld __cnfn sign(float x);
+float2 __ovld __cnfn sign(float2 x);
+float3 __ovld __cnfn sign(float3 x);
+float4 __ovld __cnfn sign(float4 x);
+float8 __ovld __cnfn sign(float8 x);
+float16 __ovld __cnfn sign(float16 x);
+#ifdef cl_khr_fp64
+double __ovld __cnfn sign(double x);
+double2 __ovld __cnfn sign(double2 x);
+double3 __ovld __cnfn sign(double3 x);
+double4 __ovld __cnfn sign(double4 x);
+double8 __ovld __cnfn sign(double8 x);
+double16 __ovld __cnfn sign(double16 x);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn sign(half x);
+half2 __ovld __cnfn sign(half2 x);
+half3 __ovld __cnfn sign(half3 x);
+half4 __ovld __cnfn sign(half4 x);
+half8 __ovld __cnfn sign(half8 x);
+half16 __ovld __cnfn sign(half16 x);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions
+
+/**
+ * Returns the cross product of p0.xyz and p1.xyz. The
+ * w component of float4 result returned will be 0.0.
+ */
+float4 __ovld __cnfn cross(float4 p0, float4 p1);
+float3 __ovld __cnfn cross(float3 p0, float3 p1);
+#ifdef cl_khr_fp64
+double4 __ovld __cnfn cross(double4 p0, double4 p1);
+double3 __ovld __cnfn cross(double3 p0, double3 p1);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half4 __ovld __cnfn cross(half4 p0, half4 p1);
+half3 __ovld __cnfn cross(half3 p0, half3 p1);
+#endif //cl_khr_fp16
+
+/**
+ * Compute dot product.
+ */
+float __ovld __cnfn dot(float p0, float p1);
+float __ovld __cnfn dot(float2 p0, float2 p1);
+float __ovld __cnfn dot(float3 p0, float3 p1);
+float __ovld __cnfn dot(float4 p0, float4 p1);
+#ifdef cl_khr_fp64
+double __ovld __cnfn dot(double p0, double p1);
+double __ovld __cnfn dot(double2 p0, double2 p1);
+double __ovld __cnfn dot(double3 p0, double3 p1);
+double __ovld __cnfn dot(double4 p0, double4 p1);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn dot(half p0, half p1);
+half __ovld __cnfn dot(half2 p0, half2 p1);
+half __ovld __cnfn dot(half3 p0, half3 p1);
+half __ovld __cnfn dot(half4 p0, half4 p1);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the distance between p0 and p1. This is
+ * calculated as length(p0 - p1).
+ */
+float __ovld __cnfn distance(float p0, float p1);
+float __ovld __cnfn distance(float2 p0, float2 p1);
+float __ovld __cnfn distance(float3 p0, float3 p1);
+float __ovld __cnfn distance(float4 p0, float4 p1);
+#ifdef cl_khr_fp64
+double __ovld __cnfn distance(double p0, double p1);
+double __ovld __cnfn distance(double2 p0, double2 p1);
+double __ovld __cnfn distance(double3 p0, double3 p1);
+double __ovld __cnfn distance(double4 p0, double4 p1);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn distance(half p0, half p1);
+half __ovld __cnfn distance(half2 p0, half2 p1);
+half __ovld __cnfn distance(half3 p0, half3 p1);
+half __ovld __cnfn distance(half4 p0, half4 p1);
+#endif //cl_khr_fp16
+
+/**
+ * Return the length of vector p, i.e.,
+ * sqrt(p.x2 + p.y 2 + ...)
+ */
+float __ovld __cnfn length(float p);
+float __ovld __cnfn length(float2 p);
+float __ovld __cnfn length(float3 p);
+float __ovld __cnfn length(float4 p);
+#ifdef cl_khr_fp64
+double __ovld __cnfn length(double p);
+double __ovld __cnfn length(double2 p);
+double __ovld __cnfn length(double3 p);
+double __ovld __cnfn length(double4 p);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn length(half p);
+half __ovld __cnfn length(half2 p);
+half __ovld __cnfn length(half3 p);
+half __ovld __cnfn length(half4 p);
+#endif //cl_khr_fp16
+
+/**
+ * Returns a vector in the same direction as p but with a
+ * length of 1.
+ */
+float __ovld __cnfn normalize(float p);
+float2 __ovld __cnfn normalize(float2 p);
+float3 __ovld __cnfn normalize(float3 p);
+float4 __ovld __cnfn normalize(float4 p);
+#ifdef cl_khr_fp64
+double __ovld __cnfn normalize(double p);
+double2 __ovld __cnfn normalize(double2 p);
+double3 __ovld __cnfn normalize(double3 p);
+double4 __ovld __cnfn normalize(double4 p);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn normalize(half p);
+half2 __ovld __cnfn normalize(half2 p);
+half3 __ovld __cnfn normalize(half3 p);
+half4 __ovld __cnfn normalize(half4 p);
+#endif //cl_khr_fp16
+
+/**
+ * Returns fast_length(p0 - p1).
+ */
+float __ovld __cnfn fast_distance(float p0, float p1);
+float __ovld __cnfn fast_distance(float2 p0, float2 p1);
+float __ovld __cnfn fast_distance(float3 p0, float3 p1);
+float __ovld __cnfn fast_distance(float4 p0, float4 p1);
+#ifdef cl_khr_fp16
+half __ovld __cnfn fast_distance(half p0, half p1);
+half __ovld __cnfn fast_distance(half2 p0, half2 p1);
+half __ovld __cnfn fast_distance(half3 p0, half3 p1);
+half __ovld __cnfn fast_distance(half4 p0, half4 p1);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the length of vector p computed as:
+ * half_sqrt(p.x2 + p.y2 + ...)
+ */
+float __ovld __cnfn fast_length(float p);
+float __ovld __cnfn fast_length(float2 p);
+float __ovld __cnfn fast_length(float3 p);
+float __ovld __cnfn fast_length(float4 p);
+#ifdef cl_khr_fp16
+half __ovld __cnfn fast_length(half p);
+half __ovld __cnfn fast_length(half2 p);
+half __ovld __cnfn fast_length(half3 p);
+half __ovld __cnfn fast_length(half4 p);
+#endif //cl_khr_fp16
+
+/**
+ * Returns a vector in the same direction as p but with a
+ * length of 1. fast_normalize is computed as:
+ * p * half_rsqrt (p.x^2 + p.y^2 + ... )
+ * The result shall be within 8192 ulps error from the
+ * infinitely precise result of
+ * if (all(p == 0.0f))
+ * result = p;
+ * else
+ * result = p / sqrt (p.x^2 + p.y^2 + ...);
+ * with the following exceptions:
+ * 1) If the sum of squares is greater than FLT_MAX
+ * then the value of the floating-point values in the
+ * result vector are undefined.
+ * 2) If the sum of squares is less than FLT_MIN then
+ * the implementation may return back p.
+ * 3) If the device is in "denorms are flushed to zero"
+ * mode, individual operand elements with magnitude
+ * less than sqrt(FLT_MIN) may be flushed to zero
+ * before proceeding with the calculation.
+ */
+float __ovld __cnfn fast_normalize(float p);
+float2 __ovld __cnfn fast_normalize(float2 p);
+float3 __ovld __cnfn fast_normalize(float3 p);
+float4 __ovld __cnfn fast_normalize(float4 p);
+#ifdef cl_khr_fp16
+half __ovld __cnfn fast_normalize(half p);
+half2 __ovld __cnfn fast_normalize(half2 p);
+half3 __ovld __cnfn fast_normalize(half3 p);
+half4 __ovld __cnfn fast_normalize(half4 p);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions
+
+/**
+ * intn isequal (floatn x, floatn y)
+ * Returns the component-wise compare of x == y.
+ */
+int __ovld __cnfn isequal(float x, float y);
+int2 __ovld __cnfn isequal(float2 x, float2 y);
+int3 __ovld __cnfn isequal(float3 x, float3 y);
+int4 __ovld __cnfn isequal(float4 x, float4 y);
+int8 __ovld __cnfn isequal(float8 x, float8 y);
+int16 __ovld __cnfn isequal(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isequal(double x, double y);
+long2 __ovld __cnfn isequal(double2 x, double2 y);
+long3 __ovld __cnfn isequal(double3 x, double3 y);
+long4 __ovld __cnfn isequal(double4 x, double4 y);
+long8 __ovld __cnfn isequal(double8 x, double8 y);
+long16 __ovld __cnfn isequal(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isequal(half x, half y);
+short2 __ovld __cnfn isequal(half2 x, half2 y);
+short3 __ovld __cnfn isequal(half3 x, half3 y);
+short4 __ovld __cnfn isequal(half4 x, half4 y);
+short8 __ovld __cnfn isequal(half8 x, half8 y);
+short16 __ovld __cnfn isequal(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x != y.
+ */
+int __ovld __cnfn isnotequal(float x, float y);
+int2 __ovld __cnfn isnotequal(float2 x, float2 y);
+int3 __ovld __cnfn isnotequal(float3 x, float3 y);
+int4 __ovld __cnfn isnotequal(float4 x, float4 y);
+int8 __ovld __cnfn isnotequal(float8 x, float8 y);
+int16 __ovld __cnfn isnotequal(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnotequal(double x, double y);
+long2 __ovld __cnfn isnotequal(double2 x, double2 y);
+long3 __ovld __cnfn isnotequal(double3 x, double3 y);
+long4 __ovld __cnfn isnotequal(double4 x, double4 y);
+long8 __ovld __cnfn isnotequal(double8 x, double8 y);
+long16 __ovld __cnfn isnotequal(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnotequal(half x, half y);
+short2 __ovld __cnfn isnotequal(half2 x, half2 y);
+short3 __ovld __cnfn isnotequal(half3 x, half3 y);
+short4 __ovld __cnfn isnotequal(half4 x, half4 y);
+short8 __ovld __cnfn isnotequal(half8 x, half8 y);
+short16 __ovld __cnfn isnotequal(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x > y.
+ */
+int __ovld __cnfn isgreater(float x, float y);
+int2 __ovld __cnfn isgreater(float2 x, float2 y);
+int3 __ovld __cnfn isgreater(float3 x, float3 y);
+int4 __ovld __cnfn isgreater(float4 x, float4 y);
+int8 __ovld __cnfn isgreater(float8 x, float8 y);
+int16 __ovld __cnfn isgreater(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isgreater(double x, double y);
+long2 __ovld __cnfn isgreater(double2 x, double2 y);
+long3 __ovld __cnfn isgreater(double3 x, double3 y);
+long4 __ovld __cnfn isgreater(double4 x, double4 y);
+long8 __ovld __cnfn isgreater(double8 x, double8 y);
+long16 __ovld __cnfn isgreater(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isgreater(half x, half y);
+short2 __ovld __cnfn isgreater(half2 x, half2 y);
+short3 __ovld __cnfn isgreater(half3 x, half3 y);
+short4 __ovld __cnfn isgreater(half4 x, half4 y);
+short8 __ovld __cnfn isgreater(half8 x, half8 y);
+short16 __ovld __cnfn isgreater(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x >= y.
+ */
+int __ovld __cnfn isgreaterequal(float x, float y);
+int2 __ovld __cnfn isgreaterequal(float2 x, float2 y);
+int3 __ovld __cnfn isgreaterequal(float3 x, float3 y);
+int4 __ovld __cnfn isgreaterequal(float4 x, float4 y);
+int8 __ovld __cnfn isgreaterequal(float8 x, float8 y);
+int16 __ovld __cnfn isgreaterequal(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isgreaterequal(double x, double y);
+long2 __ovld __cnfn isgreaterequal(double2 x, double2 y);
+long3 __ovld __cnfn isgreaterequal(double3 x, double3 y);
+long4 __ovld __cnfn isgreaterequal(double4 x, double4 y);
+long8 __ovld __cnfn isgreaterequal(double8 x, double8 y);
+long16 __ovld __cnfn isgreaterequal(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isgreaterequal(half x, half y);
+short2 __ovld __cnfn isgreaterequal(half2 x, half2 y);
+short3 __ovld __cnfn isgreaterequal(half3 x, half3 y);
+short4 __ovld __cnfn isgreaterequal(half4 x, half4 y);
+short8 __ovld __cnfn isgreaterequal(half8 x, half8 y);
+short16 __ovld __cnfn isgreaterequal(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x < y.
+ */
+int __ovld __cnfn isless(float x, float y);
+int2 __ovld __cnfn isless(float2 x, float2 y);
+int3 __ovld __cnfn isless(float3 x, float3 y);
+int4 __ovld __cnfn isless(float4 x, float4 y);
+int8 __ovld __cnfn isless(float8 x, float8 y);
+int16 __ovld __cnfn isless(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isless(double x, double y);
+long2 __ovld __cnfn isless(double2 x, double2 y);
+long3 __ovld __cnfn isless(double3 x, double3 y);
+long4 __ovld __cnfn isless(double4 x, double4 y);
+long8 __ovld __cnfn isless(double8 x, double8 y);
+long16 __ovld __cnfn isless(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isless(half x, half y);
+short2 __ovld __cnfn isless(half2 x, half2 y);
+short3 __ovld __cnfn isless(half3 x, half3 y);
+short4 __ovld __cnfn isless(half4 x, half4 y);
+short8 __ovld __cnfn isless(half8 x, half8 y);
+short16 __ovld __cnfn isless(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of x <= y.
+ */
+int __ovld __cnfn islessequal(float x, float y);
+int2 __ovld __cnfn islessequal(float2 x, float2 y);
+int3 __ovld __cnfn islessequal(float3 x, float3 y);
+int4 __ovld __cnfn islessequal(float4 x, float4 y);
+int8 __ovld __cnfn islessequal(float8 x, float8 y);
+int16 __ovld __cnfn islessequal(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn islessequal(double x, double y);
+long2 __ovld __cnfn islessequal(double2 x, double2 y);
+long3 __ovld __cnfn islessequal(double3 x, double3 y);
+long4 __ovld __cnfn islessequal(double4 x, double4 y);
+long8 __ovld __cnfn islessequal(double8 x, double8 y);
+long16 __ovld __cnfn islessequal(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn islessequal(half x, half y);
+short2 __ovld __cnfn islessequal(half2 x, half2 y);
+short3 __ovld __cnfn islessequal(half3 x, half3 y);
+short4 __ovld __cnfn islessequal(half4 x, half4 y);
+short8 __ovld __cnfn islessequal(half8 x, half8 y);
+short16 __ovld __cnfn islessequal(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Returns the component-wise compare of
+ * (x < y) || (x > y) .
+ */
+int __ovld __cnfn islessgreater(float x, float y);
+int2 __ovld __cnfn islessgreater(float2 x, float2 y);
+int3 __ovld __cnfn islessgreater(float3 x, float3 y);
+int4 __ovld __cnfn islessgreater(float4 x, float4 y);
+int8 __ovld __cnfn islessgreater(float8 x, float8 y);
+int16 __ovld __cnfn islessgreater(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn islessgreater(double x, double y);
+long2 __ovld __cnfn islessgreater(double2 x, double2 y);
+long3 __ovld __cnfn islessgreater(double3 x, double3 y);
+long4 __ovld __cnfn islessgreater(double4 x, double4 y);
+long8 __ovld __cnfn islessgreater(double8 x, double8 y);
+long16 __ovld __cnfn islessgreater(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn islessgreater(half x, half y);
+short2 __ovld __cnfn islessgreater(half2 x, half2 y);
+short3 __ovld __cnfn islessgreater(half3 x, half3 y);
+short4 __ovld __cnfn islessgreater(half4 x, half4 y);
+short8 __ovld __cnfn islessgreater(half8 x, half8 y);
+short16 __ovld __cnfn islessgreater(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Test for finite value.
+ */
+int __ovld __cnfn isfinite(float);
+int2 __ovld __cnfn isfinite(float2);
+int3 __ovld __cnfn isfinite(float3);
+int4 __ovld __cnfn isfinite(float4);
+int8 __ovld __cnfn isfinite(float8);
+int16 __ovld __cnfn isfinite(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isfinite(double);
+long2 __ovld __cnfn isfinite(double2);
+long3 __ovld __cnfn isfinite(double3);
+long4 __ovld __cnfn isfinite(double4);
+long8 __ovld __cnfn isfinite(double8);
+long16 __ovld __cnfn isfinite(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isfinite(half);
+short2 __ovld __cnfn isfinite(half2);
+short3 __ovld __cnfn isfinite(half3);
+short4 __ovld __cnfn isfinite(half4);
+short8 __ovld __cnfn isfinite(half8);
+short16 __ovld __cnfn isfinite(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for infinity value (+ve or -ve) .
+ */
+int __ovld __cnfn isinf(float);
+int2 __ovld __cnfn isinf(float2);
+int3 __ovld __cnfn isinf(float3);
+int4 __ovld __cnfn isinf(float4);
+int8 __ovld __cnfn isinf(float8);
+int16 __ovld __cnfn isinf(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isinf(double);
+long2 __ovld __cnfn isinf(double2);
+long3 __ovld __cnfn isinf(double3);
+long4 __ovld __cnfn isinf(double4);
+long8 __ovld __cnfn isinf(double8);
+long16 __ovld __cnfn isinf(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isinf(half);
+short2 __ovld __cnfn isinf(half2);
+short3 __ovld __cnfn isinf(half3);
+short4 __ovld __cnfn isinf(half4);
+short8 __ovld __cnfn isinf(half8);
+short16 __ovld __cnfn isinf(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for a NaN.
+ */
+int __ovld __cnfn isnan(float);
+int2 __ovld __cnfn isnan(float2);
+int3 __ovld __cnfn isnan(float3);
+int4 __ovld __cnfn isnan(float4);
+int8 __ovld __cnfn isnan(float8);
+int16 __ovld __cnfn isnan(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnan(double);
+long2 __ovld __cnfn isnan(double2);
+long3 __ovld __cnfn isnan(double3);
+long4 __ovld __cnfn isnan(double4);
+long8 __ovld __cnfn isnan(double8);
+long16 __ovld __cnfn isnan(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnan(half);
+short2 __ovld __cnfn isnan(half2);
+short3 __ovld __cnfn isnan(half3);
+short4 __ovld __cnfn isnan(half4);
+short8 __ovld __cnfn isnan(half8);
+short16 __ovld __cnfn isnan(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test for a normal value.
+ */
+int __ovld __cnfn isnormal(float);
+int2 __ovld __cnfn isnormal(float2);
+int3 __ovld __cnfn isnormal(float3);
+int4 __ovld __cnfn isnormal(float4);
+int8 __ovld __cnfn isnormal(float8);
+int16 __ovld __cnfn isnormal(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isnormal(double);
+long2 __ovld __cnfn isnormal(double2);
+long3 __ovld __cnfn isnormal(double3);
+long4 __ovld __cnfn isnormal(double4);
+long8 __ovld __cnfn isnormal(double8);
+long16 __ovld __cnfn isnormal(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isnormal(half);
+short2 __ovld __cnfn isnormal(half2);
+short3 __ovld __cnfn isnormal(half3);
+short4 __ovld __cnfn isnormal(half4);
+short8 __ovld __cnfn isnormal(half8);
+short16 __ovld __cnfn isnormal(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Test if arguments are ordered. isordered() takes
+ * arguments x and y, and returns the result
+ * isequal(x, x) && isequal(y, y).
+ */
+int __ovld __cnfn isordered(float x, float y);
+int2 __ovld __cnfn isordered(float2 x, float2 y);
+int3 __ovld __cnfn isordered(float3 x, float3 y);
+int4 __ovld __cnfn isordered(float4 x, float4 y);
+int8 __ovld __cnfn isordered(float8 x, float8 y);
+int16 __ovld __cnfn isordered(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isordered(double x, double y);
+long2 __ovld __cnfn isordered(double2 x, double2 y);
+long3 __ovld __cnfn isordered(double3 x, double3 y);
+long4 __ovld __cnfn isordered(double4 x, double4 y);
+long8 __ovld __cnfn isordered(double8 x, double8 y);
+long16 __ovld __cnfn isordered(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isordered(half x, half y);
+short2 __ovld __cnfn isordered(half2 x, half2 y);
+short3 __ovld __cnfn isordered(half3 x, half3 y);
+short4 __ovld __cnfn isordered(half4 x, half4 y);
+short8 __ovld __cnfn isordered(half8 x, half8 y);
+short16 __ovld __cnfn isordered(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Test if arguments are unordered. isunordered()
+ * takes arguments x and y, returning non-zero if x or y
+ * is NaN, and zero otherwise.
+ */
+int __ovld __cnfn isunordered(float x, float y);
+int2 __ovld __cnfn isunordered(float2 x, float2 y);
+int3 __ovld __cnfn isunordered(float3 x, float3 y);
+int4 __ovld __cnfn isunordered(float4 x, float4 y);
+int8 __ovld __cnfn isunordered(float8 x, float8 y);
+int16 __ovld __cnfn isunordered(float16 x, float16 y);
+#ifdef cl_khr_fp64
+int __ovld __cnfn isunordered(double x, double y);
+long2 __ovld __cnfn isunordered(double2 x, double2 y);
+long3 __ovld __cnfn isunordered(double3 x, double3 y);
+long4 __ovld __cnfn isunordered(double4 x, double4 y);
+long8 __ovld __cnfn isunordered(double8 x, double8 y);
+long16 __ovld __cnfn isunordered(double16 x, double16 y);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn isunordered(half x, half y);
+short2 __ovld __cnfn isunordered(half2 x, half2 y);
+short3 __ovld __cnfn isunordered(half3 x, half3 y);
+short4 __ovld __cnfn isunordered(half4 x, half4 y);
+short8 __ovld __cnfn isunordered(half8 x, half8 y);
+short16 __ovld __cnfn isunordered(half16 x, half16 y);
+#endif //cl_khr_fp16
+
+/**
+ * Test for sign bit. The scalar version of the function
+ * returns a 1 if the sign bit in the float is set else returns
+ * 0. The vector version of the function returns the
+ * following for each component in floatn: a -1 if the
+ * sign bit in the float is set else returns 0.
+ */
+int __ovld __cnfn signbit(float);
+int2 __ovld __cnfn signbit(float2);
+int3 __ovld __cnfn signbit(float3);
+int4 __ovld __cnfn signbit(float4);
+int8 __ovld __cnfn signbit(float8);
+int16 __ovld __cnfn signbit(float16);
+#ifdef cl_khr_fp64
+int __ovld __cnfn signbit(double);
+long2 __ovld __cnfn signbit(double2);
+long3 __ovld __cnfn signbit(double3);
+long4 __ovld __cnfn signbit(double4);
+long8 __ovld __cnfn signbit(double8);
+long16 __ovld __cnfn signbit(double16);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+int __ovld __cnfn signbit(half);
+short2 __ovld __cnfn signbit(half2);
+short3 __ovld __cnfn signbit(half3);
+short4 __ovld __cnfn signbit(half4);
+short8 __ovld __cnfn signbit(half8);
+short16 __ovld __cnfn signbit(half16);
+#endif //cl_khr_fp16
+
+/**
+ * Returns 1 if the most significant bit in any component
+ * of x is set; otherwise returns 0.
+ */
+int __ovld __cnfn any(char x);
+int __ovld __cnfn any(char2 x);
+int __ovld __cnfn any(char3 x);
+int __ovld __cnfn any(char4 x);
+int __ovld __cnfn any(char8 x);
+int __ovld __cnfn any(char16 x);
+int __ovld __cnfn any(short x);
+int __ovld __cnfn any(short2 x);
+int __ovld __cnfn any(short3 x);
+int __ovld __cnfn any(short4 x);
+int __ovld __cnfn any(short8 x);
+int __ovld __cnfn any(short16 x);
+int __ovld __cnfn any(int x);
+int __ovld __cnfn any(int2 x);
+int __ovld __cnfn any(int3 x);
+int __ovld __cnfn any(int4 x);
+int __ovld __cnfn any(int8 x);
+int __ovld __cnfn any(int16 x);
+int __ovld __cnfn any(long x);
+int __ovld __cnfn any(long2 x);
+int __ovld __cnfn any(long3 x);
+int __ovld __cnfn any(long4 x);
+int __ovld __cnfn any(long8 x);
+int __ovld __cnfn any(long16 x);
+
+/**
+ * Returns 1 if the most significant bit in all components
+ * of x is set; otherwise returns 0.
+ */
+int __ovld __cnfn all(char x);
+int __ovld __cnfn all(char2 x);
+int __ovld __cnfn all(char3 x);
+int __ovld __cnfn all(char4 x);
+int __ovld __cnfn all(char8 x);
+int __ovld __cnfn all(char16 x);
+int __ovld __cnfn all(short x);
+int __ovld __cnfn all(short2 x);
+int __ovld __cnfn all(short3 x);
+int __ovld __cnfn all(short4 x);
+int __ovld __cnfn all(short8 x);
+int __ovld __cnfn all(short16 x);
+int __ovld __cnfn all(int x);
+int __ovld __cnfn all(int2 x);
+int __ovld __cnfn all(int3 x);
+int __ovld __cnfn all(int4 x);
+int __ovld __cnfn all(int8 x);
+int __ovld __cnfn all(int16 x);
+int __ovld __cnfn all(long x);
+int __ovld __cnfn all(long2 x);
+int __ovld __cnfn all(long3 x);
+int __ovld __cnfn all(long4 x);
+int __ovld __cnfn all(long8 x);
+int __ovld __cnfn all(long16 x);
+
+/**
+ * Each bit of the result is the corresponding bit of a if
+ * the corresponding bit of c is 0. Otherwise it is the
+ * corresponding bit of b.
+ */
+char __ovld __cnfn bitselect(char a, char b, char c);
+uchar __ovld __cnfn bitselect(uchar a, uchar b, uchar c);
+char2 __ovld __cnfn bitselect(char2 a, char2 b, char2 c);
+uchar2 __ovld __cnfn bitselect(uchar2 a, uchar2 b, uchar2 c);
+char3 __ovld __cnfn bitselect(char3 a, char3 b, char3 c);
+uchar3 __ovld __cnfn bitselect(uchar3 a, uchar3 b, uchar3 c);
+char4 __ovld __cnfn bitselect(char4 a, char4 b, char4 c);
+uchar4 __ovld __cnfn bitselect(uchar4 a, uchar4 b, uchar4 c);
+char8 __ovld __cnfn bitselect(char8 a, char8 b, char8 c);
+uchar8 __ovld __cnfn bitselect(uchar8 a, uchar8 b, uchar8 c);
+char16 __ovld __cnfn bitselect(char16 a, char16 b, char16 c);
+uchar16 __ovld __cnfn bitselect(uchar16 a, uchar16 b, uchar16 c);
+short __ovld __cnfn bitselect(short a, short b, short c);
+ushort __ovld __cnfn bitselect(ushort a, ushort b, ushort c);
+short2 __ovld __cnfn bitselect(short2 a, short2 b, short2 c);
+ushort2 __ovld __cnfn bitselect(ushort2 a, ushort2 b, ushort2 c);
+short3 __ovld __cnfn bitselect(short3 a, short3 b, short3 c);
+ushort3 __ovld __cnfn bitselect(ushort3 a, ushort3 b, ushort3 c);
+short4 __ovld __cnfn bitselect(short4 a, short4 b, short4 c);
+ushort4 __ovld __cnfn bitselect(ushort4 a, ushort4 b, ushort4 c);
+short8 __ovld __cnfn bitselect(short8 a, short8 b, short8 c);
+ushort8 __ovld __cnfn bitselect(ushort8 a, ushort8 b, ushort8 c);
+short16 __ovld __cnfn bitselect(short16 a, short16 b, short16 c);
+ushort16 __ovld __cnfn bitselect(ushort16 a, ushort16 b, ushort16 c);
+int __ovld __cnfn bitselect(int a, int b, int c);
+uint __ovld __cnfn bitselect(uint a, uint b, uint c);
+int2 __ovld __cnfn bitselect(int2 a, int2 b, int2 c);
+uint2 __ovld __cnfn bitselect(uint2 a, uint2 b, uint2 c);
+int3 __ovld __cnfn bitselect(int3 a, int3 b, int3 c);
+uint3 __ovld __cnfn bitselect(uint3 a, uint3 b, uint3 c);
+int4 __ovld __cnfn bitselect(int4 a, int4 b, int4 c);
+uint4 __ovld __cnfn bitselect(uint4 a, uint4 b, uint4 c);
+int8 __ovld __cnfn bitselect(int8 a, int8 b, int8 c);
+uint8 __ovld __cnfn bitselect(uint8 a, uint8 b, uint8 c);
+int16 __ovld __cnfn bitselect(int16 a, int16 b, int16 c);
+uint16 __ovld __cnfn bitselect(uint16 a, uint16 b, uint16 c);
+long __ovld __cnfn bitselect(long a, long b, long c);
+ulong __ovld __cnfn bitselect(ulong a, ulong b, ulong c);
+long2 __ovld __cnfn bitselect(long2 a, long2 b, long2 c);
+ulong2 __ovld __cnfn bitselect(ulong2 a, ulong2 b, ulong2 c);
+long3 __ovld __cnfn bitselect(long3 a, long3 b, long3 c);
+ulong3 __ovld __cnfn bitselect(ulong3 a, ulong3 b, ulong3 c);
+long4 __ovld __cnfn bitselect(long4 a, long4 b, long4 c);
+ulong4 __ovld __cnfn bitselect(ulong4 a, ulong4 b, ulong4 c);
+long8 __ovld __cnfn bitselect(long8 a, long8 b, long8 c);
+ulong8 __ovld __cnfn bitselect(ulong8 a, ulong8 b, ulong8 c);
+long16 __ovld __cnfn bitselect(long16 a, long16 b, long16 c);
+ulong16 __ovld __cnfn bitselect(ulong16 a, ulong16 b, ulong16 c);
+float __ovld __cnfn bitselect(float a, float b, float c);
+float2 __ovld __cnfn bitselect(float2 a, float2 b, float2 c);
+float3 __ovld __cnfn bitselect(float3 a, float3 b, float3 c);
+float4 __ovld __cnfn bitselect(float4 a, float4 b, float4 c);
+float8 __ovld __cnfn bitselect(float8 a, float8 b, float8 c);
+float16 __ovld __cnfn bitselect(float16 a, float16 b, float16 c);
+#ifdef cl_khr_fp64
+double __ovld __cnfn bitselect(double a, double b, double c);
+double2 __ovld __cnfn bitselect(double2 a, double2 b, double2 c);
+double3 __ovld __cnfn bitselect(double3 a, double3 b, double3 c);
+double4 __ovld __cnfn bitselect(double4 a, double4 b, double4 c);
+double8 __ovld __cnfn bitselect(double8 a, double8 b, double8 c);
+double16 __ovld __cnfn bitselect(double16 a, double16 b, double16 c);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn bitselect(half a, half b, half c);
+half2 __ovld __cnfn bitselect(half2 a, half2 b, half2 c);
+half3 __ovld __cnfn bitselect(half3 a, half3 b, half3 c);
+half4 __ovld __cnfn bitselect(half4 a, half4 b, half4 c);
+half8 __ovld __cnfn bitselect(half8 a, half8 b, half8 c);
+half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c);
+#endif //cl_khr_fp16
+
+/**
+ * For each component of a vector type,
+ * result[i] = if MSB of c[i] is set ? b[i] : a[i].
+ * For a scalar type, result = c ? b : a.
+ */
+char __ovld __cnfn select(char a, char b, char c);
+uchar __ovld __cnfn select(uchar a, uchar b, char c);
+char2 __ovld __cnfn select(char2 a, char2 b, char2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, char2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, char3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, char3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, char4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, char4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, char8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, char8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, char16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, char16 c);
+short __ovld __cnfn select(short a, short b, char c);
+ushort __ovld __cnfn select(ushort a, ushort b, char c);
+short2 __ovld __cnfn select(short2 a, short2 b, char2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, char2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, char3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, char3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, char4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, char4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, char8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, char8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, char16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, char16 c);
+int __ovld __cnfn select(int a, int b, char c);
+uint __ovld __cnfn select(uint a, uint b, char c);
+int2 __ovld __cnfn select(int2 a, int2 b, char2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, char2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, char3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, char3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, char4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, char4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, char8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, char8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, char16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, char16 c);
+long __ovld __cnfn select(long a, long b, char c);
+ulong __ovld __cnfn select(ulong a, ulong b, char c);
+long2 __ovld __cnfn select(long2 a, long2 b, char2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, char2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, char3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, char3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, char4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, char4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, char8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, char8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, char16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, char16 c);
+float __ovld __cnfn select(float a, float b, char c);
+float2 __ovld __cnfn select(float2 a, float2 b, char2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, char3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, char4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, char8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, char16 c);
+char __ovld __cnfn select(char a, char b, short c);
+uchar __ovld __cnfn select(uchar a, uchar b, short c);
+char2 __ovld __cnfn select(char2 a, char2 b, short2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, short2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, short3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, short3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, short4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, short4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, short8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, short8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, short16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, short16 c);
+short __ovld __cnfn select(short a, short b, short c);
+ushort __ovld __cnfn select(ushort a, ushort b, short c);
+short2 __ovld __cnfn select(short2 a, short2 b, short2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, short2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, short3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, short3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, short4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, short4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, short8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, short8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, short16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, short16 c);
+int __ovld __cnfn select(int a, int b, short c);
+uint __ovld __cnfn select(uint a, uint b, short c);
+int2 __ovld __cnfn select(int2 a, int2 b, short2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, short2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, short3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, short3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, short4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, short4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, short8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, short8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, short16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, short16 c);
+long __ovld __cnfn select(long a, long b, short c);
+ulong __ovld __cnfn select(ulong a, ulong b, short c);
+long2 __ovld __cnfn select(long2 a, long2 b, short2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, short2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, short3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, short3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, short4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, short4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, short8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, short8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, short16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, short16 c);
+float __ovld __cnfn select(float a, float b, short c);
+float2 __ovld __cnfn select(float2 a, float2 b, short2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, short3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, short4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, short8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, short16 c);
+char __ovld __cnfn select(char a, char b, int c);
+uchar __ovld __cnfn select(uchar a, uchar b, int c);
+char2 __ovld __cnfn select(char2 a, char2 b, int2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, int2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, int3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, int3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, int4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, int4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, int8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, int8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, int16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, int16 c);
+short __ovld __cnfn select(short a, short b, int c);
+ushort __ovld __cnfn select(ushort a, ushort b, int c);
+short2 __ovld __cnfn select(short2 a, short2 b, int2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, int2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, int3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, int3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, int4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, int4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, int8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, int8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, int16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, int16 c);
+int __ovld __cnfn select(int a, int b, int c);
+uint __ovld __cnfn select(uint a, uint b, int c);
+int2 __ovld __cnfn select(int2 a, int2 b, int2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, int2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, int3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, int3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, int4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, int4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, int8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, int8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, int16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, int16 c);
+long __ovld __cnfn select(long a, long b, int c);
+ulong __ovld __cnfn select(ulong a, ulong b, int c);
+long2 __ovld __cnfn select(long2 a, long2 b, int2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, int2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, int3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, int3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, int4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, int4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, int8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, int8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, int16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, int16 c);
+float __ovld __cnfn select(float a, float b, int c);
+float2 __ovld __cnfn select(float2 a, float2 b, int2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, int3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, int4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, int8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, int16 c);
+char __ovld __cnfn select(char a, char b, long c);
+uchar __ovld __cnfn select(uchar a, uchar b, long c);
+char2 __ovld __cnfn select(char2 a, char2 b, long2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, long2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, long3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, long3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, long4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, long4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, long8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, long8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, long16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, long16 c);
+short __ovld __cnfn select(short a, short b, long c);
+ushort __ovld __cnfn select(ushort a, ushort b, long c);
+short2 __ovld __cnfn select(short2 a, short2 b, long2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, long2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, long3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, long3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, long4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, long4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, long8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, long8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, long16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, long16 c);
+int __ovld __cnfn select(int a, int b, long c);
+uint __ovld __cnfn select(uint a, uint b, long c);
+int2 __ovld __cnfn select(int2 a, int2 b, long2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, long2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, long3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, long3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, long4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, long4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, long8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, long8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, long16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, long16 c);
+long __ovld __cnfn select(long a, long b, long c);
+ulong __ovld __cnfn select(ulong a, ulong b, long c);
+long2 __ovld __cnfn select(long2 a, long2 b, long2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, long2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, long3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, long3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, long4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, long4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, long8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, long8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, long16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, long16 c);
+float __ovld __cnfn select(float a, float b, long c);
+float2 __ovld __cnfn select(float2 a, float2 b, long2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, long3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, long4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, long8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, long16 c);
+char __ovld __cnfn select(char a, char b, uchar c);
+uchar __ovld __cnfn select(uchar a, uchar b, uchar c);
+char2 __ovld __cnfn select(char2 a, char2 b, uchar2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uchar2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, uchar3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uchar3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, uchar4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uchar4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, uchar8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uchar8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, uchar16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uchar16 c);
+short __ovld __cnfn select(short a, short b, uchar c);
+ushort __ovld __cnfn select(ushort a, ushort b, uchar c);
+short2 __ovld __cnfn select(short2 a, short2 b, uchar2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, uchar2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, uchar3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, uchar3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, uchar4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, uchar4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, uchar8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, uchar8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, uchar16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, uchar16 c);
+int __ovld __cnfn select(int a, int b, uchar c);
+uint __ovld __cnfn select(uint a, uint b, uchar c);
+int2 __ovld __cnfn select(int2 a, int2 b, uchar2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, uchar2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, uchar3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, uchar3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, uchar4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, uchar4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, uchar8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, uchar8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, uchar16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, uchar16 c);
+long __ovld __cnfn select(long a, long b, uchar c);
+ulong __ovld __cnfn select(ulong a, ulong b, uchar c);
+long2 __ovld __cnfn select(long2 a, long2 b, uchar2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, uchar2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, uchar3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, uchar3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, uchar4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, uchar4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, uchar8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, uchar8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, uchar16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, uchar16 c);
+float __ovld __cnfn select(float a, float b, uchar c);
+float2 __ovld __cnfn select(float2 a, float2 b, uchar2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, uchar3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, uchar4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, uchar8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, uchar16 c);
+char __ovld __cnfn select(char a, char b, ushort c);
+uchar __ovld __cnfn select(uchar a, uchar b, ushort c);
+char2 __ovld __cnfn select(char2 a, char2 b, ushort2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, ushort2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, ushort3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, ushort3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, ushort4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, ushort4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, ushort8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, ushort8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, ushort16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, ushort16 c);
+short __ovld __cnfn select(short a, short b, ushort c);
+ushort __ovld __cnfn select(ushort a, ushort b, ushort c);
+short2 __ovld __cnfn select(short2 a, short2 b, ushort2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ushort2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, ushort3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ushort3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, ushort4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ushort4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, ushort8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ushort8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, ushort16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ushort16 c);
+int __ovld __cnfn select(int a, int b, ushort c);
+uint __ovld __cnfn select(uint a, uint b, ushort c);
+int2 __ovld __cnfn select(int2 a, int2 b, ushort2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, ushort2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, ushort3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, ushort3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, ushort4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, ushort4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, ushort8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, ushort8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, ushort16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, ushort16 c);
+long __ovld __cnfn select(long a, long b, ushort c);
+ulong __ovld __cnfn select(ulong a, ulong b, ushort c);
+long2 __ovld __cnfn select(long2 a, long2 b, ushort2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ushort2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, ushort3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ushort3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, ushort4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ushort4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, ushort8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ushort8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, ushort16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ushort16 c);
+float __ovld __cnfn select(float a, float b, ushort c);
+float2 __ovld __cnfn select(float2 a, float2 b, ushort2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, ushort3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, ushort4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, ushort8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, ushort16 c);
+char __ovld __cnfn select(char a, char b, uint c);
+uchar __ovld __cnfn select(uchar a, uchar b, uint c);
+char2 __ovld __cnfn select(char2 a, char2 b, uint2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uint2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, uint3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uint3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, uint4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uint4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, uint8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uint8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, uint16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uint16 c);
+short __ovld __cnfn select(short a, short b, uint c);
+ushort __ovld __cnfn select(ushort a, ushort b, uint c);
+short2 __ovld __cnfn select(short2 a, short2 b, uint2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, uint2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, uint3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, uint3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, uint4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, uint4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, uint8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, uint8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, uint16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, uint16 c);
+int __ovld __cnfn select(int a, int b, uint c);
+uint __ovld __cnfn select(uint a, uint b, uint c);
+int2 __ovld __cnfn select(int2 a, int2 b, uint2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, uint2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, uint3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, uint3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, uint4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, uint4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, uint8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, uint8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, uint16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, uint16 c);
+long __ovld __cnfn select(long a, long b, uint c);
+ulong __ovld __cnfn select(ulong a, ulong b, uint c);
+long2 __ovld __cnfn select(long2 a, long2 b, uint2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, uint2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, uint3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, uint3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, uint4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, uint4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, uint8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, uint8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, uint16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, uint16 c);
+float __ovld __cnfn select(float a, float b, uint c);
+float2 __ovld __cnfn select(float2 a, float2 b, uint2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, uint3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, uint4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, uint8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, uint16 c);
+char __ovld __cnfn select(char a, char b, ulong c);
+uchar __ovld __cnfn select(uchar a, uchar b, ulong c);
+char2 __ovld __cnfn select(char2 a, char2 b, ulong2 c);
+uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, ulong2 c);
+char3 __ovld __cnfn select(char3 a, char3 b, ulong3 c);
+uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, ulong3 c);
+char4 __ovld __cnfn select(char4 a, char4 b, ulong4 c);
+uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, ulong4 c);
+char8 __ovld __cnfn select(char8 a, char8 b, ulong8 c);
+uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, ulong8 c);
+char16 __ovld __cnfn select(char16 a, char16 b, ulong16 c);
+uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, ulong16 c);
+short __ovld __cnfn select(short a, short b, ulong c);
+ushort __ovld __cnfn select(ushort a, ushort b, ulong c);
+short2 __ovld __cnfn select(short2 a, short2 b, ulong2 c);
+ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ulong2 c);
+short3 __ovld __cnfn select(short3 a, short3 b, ulong3 c);
+ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ulong3 c);
+short4 __ovld __cnfn select(short4 a, short4 b, ulong4 c);
+ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ulong4 c);
+short8 __ovld __cnfn select(short8 a, short8 b, ulong8 c);
+ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ulong8 c);
+short16 __ovld __cnfn select(short16 a, short16 b, ulong16 c);
+ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ulong16 c);
+int __ovld __cnfn select(int a, int b, ulong c);
+uint __ovld __cnfn select(uint a, uint b, ulong c);
+int2 __ovld __cnfn select(int2 a, int2 b, ulong2 c);
+uint2 __ovld __cnfn select(uint2 a, uint2 b, ulong2 c);
+int3 __ovld __cnfn select(int3 a, int3 b, ulong3 c);
+uint3 __ovld __cnfn select(uint3 a, uint3 b, ulong3 c);
+int4 __ovld __cnfn select(int4 a, int4 b, ulong4 c);
+uint4 __ovld __cnfn select(uint4 a, uint4 b, ulong4 c);
+int8 __ovld __cnfn select(int8 a, int8 b, ulong8 c);
+uint8 __ovld __cnfn select(uint8 a, uint8 b, ulong8 c);
+int16 __ovld __cnfn select(int16 a, int16 b, ulong16 c);
+uint16 __ovld __cnfn select(uint16 a, uint16 b, ulong16 c);
+long __ovld __cnfn select(long a, long b, ulong c);
+ulong __ovld __cnfn select(ulong a, ulong b, ulong c);
+long2 __ovld __cnfn select(long2 a, long2 b, ulong2 c);
+ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ulong2 c);
+long3 __ovld __cnfn select(long3 a, long3 b, ulong3 c);
+ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ulong3 c);
+long4 __ovld __cnfn select(long4 a, long4 b, ulong4 c);
+ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ulong4 c);
+long8 __ovld __cnfn select(long8 a, long8 b, ulong8 c);
+ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ulong8 c);
+long16 __ovld __cnfn select(long16 a, long16 b, ulong16 c);
+ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ulong16 c);
+float __ovld __cnfn select(float a, float b, ulong c);
+float2 __ovld __cnfn select(float2 a, float2 b, ulong2 c);
+float3 __ovld __cnfn select(float3 a, float3 b, ulong3 c);
+float4 __ovld __cnfn select(float4 a, float4 b, ulong4 c);
+float8 __ovld __cnfn select(float8 a, float8 b, ulong8 c);
+float16 __ovld __cnfn select(float16 a, float16 b, ulong16 c);
+#ifdef cl_khr_fp64
+double __ovld __cnfn select(double a, double b, long c);
+double2 __ovld __cnfn select(double2 a, double2 b, long2 c);
+double3 __ovld __cnfn select(double3 a, double3 b, long3 c);
+double4 __ovld __cnfn select(double4 a, double4 b, long4 c);
+double8 __ovld __cnfn select(double8 a, double8 b, long8 c);
+double16 __ovld __cnfn select(double16 a, double16 b, long16 c);
+double __ovld __cnfn select(double a, double b, ulong c);
+double2 __ovld __cnfn select(double2 a, double2 b, ulong2 c);
+double3 __ovld __cnfn select(double3 a, double3 b, ulong3 c);
+double4 __ovld __cnfn select(double4 a, double4 b, ulong4 c);
+double8 __ovld __cnfn select(double8 a, double8 b, ulong8 c);
+double16 __ovld __cnfn select(double16 a, double16 b, ulong16 c);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+half __ovld __cnfn select(half a, half b, short c);
+half2 __ovld __cnfn select(half2 a, half2 b, short2 c);
+half3 __ovld __cnfn select(half3 a, half3 b, short3 c);
+half4 __ovld __cnfn select(half4 a, half4 b, short4 c);
+half8 __ovld __cnfn select(half8 a, half8 b, short8 c);
+half16 __ovld __cnfn select(half16 a, half16 b, short16 c);
+half __ovld __cnfn select(half a, half b, ushort c);
+half2 __ovld __cnfn select(half2 a, half2 b, ushort2 c);
+half3 __ovld __cnfn select(half3 a, half3 b, ushort3 c);
+half4 __ovld __cnfn select(half4 a, half4 b, ushort4 c);
+half8 __ovld __cnfn select(half8 a, half8 b, ushort8 c);
+half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
+#endif //cl_khr_fp16
+
+// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
+// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type
+/**
+ * Use generic type gentype to indicate the built-in data types
+ * char, uchar, short, ushort, int, uint, long, ulong, float,
+ * double or half.
+ *
+ * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)).
+ *
+ * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)).
+ *
+ * The address computed as (p + (offset * n)) must be
+ * 8-bit aligned if gentype is char, uchar;
+ * 16-bit aligned if gentype is short, ushort, half;
+ * 32-bit aligned if gentype is int, uint, float;
+ * 64-bit aligned if gentype is long, ulong, double.
+ */
+
+char2 __ovld vload2(size_t offset, const __constant char *p);
+uchar2 __ovld vload2(size_t offset, const __constant uchar *p);
+short2 __ovld vload2(size_t offset, const __constant short *p);
+ushort2 __ovld vload2(size_t offset, const __constant ushort *p);
+int2 __ovld vload2(size_t offset, const __constant int *p);
+uint2 __ovld vload2(size_t offset, const __constant uint *p);
+long2 __ovld vload2(size_t offset, const __constant long *p);
+ulong2 __ovld vload2(size_t offset, const __constant ulong *p);
+float2 __ovld vload2(size_t offset, const __constant float *p);
+char3 __ovld vload3(size_t offset, const __constant char *p);
+uchar3 __ovld vload3(size_t offset, const __constant uchar *p);
+short3 __ovld vload3(size_t offset, const __constant short *p);
+ushort3 __ovld vload3(size_t offset, const __constant ushort *p);
+int3 __ovld vload3(size_t offset, const __constant int *p);
+uint3 __ovld vload3(size_t offset, const __constant uint *p);
+long3 __ovld vload3(size_t offset, const __constant long *p);
+ulong3 __ovld vload3(size_t offset, const __constant ulong *p);
+float3 __ovld vload3(size_t offset, const __constant float *p);
+char4 __ovld vload4(size_t offset, const __constant char *p);
+uchar4 __ovld vload4(size_t offset, const __constant uchar *p);
+short4 __ovld vload4(size_t offset, const __constant short *p);
+ushort4 __ovld vload4(size_t offset, const __constant ushort *p);
+int4 __ovld vload4(size_t offset, const __constant int *p);
+uint4 __ovld vload4(size_t offset, const __constant uint *p);
+long4 __ovld vload4(size_t offset, const __constant long *p);
+ulong4 __ovld vload4(size_t offset, const __constant ulong *p);
+float4 __ovld vload4(size_t offset, const __constant float *p);
+char8 __ovld vload8(size_t offset, const __constant char *p);
+uchar8 __ovld vload8(size_t offset, const __constant uchar *p);
+short8 __ovld vload8(size_t offset, const __constant short *p);
+ushort8 __ovld vload8(size_t offset, const __constant ushort *p);
+int8 __ovld vload8(size_t offset, const __constant int *p);
+uint8 __ovld vload8(size_t offset, const __constant uint *p);
+long8 __ovld vload8(size_t offset, const __constant long *p);
+ulong8 __ovld vload8(size_t offset, const __constant ulong *p);
+float8 __ovld vload8(size_t offset, const __constant float *p);
+char16 __ovld vload16(size_t offset, const __constant char *p);
+uchar16 __ovld vload16(size_t offset, const __constant uchar *p);
+short16 __ovld vload16(size_t offset, const __constant short *p);
+ushort16 __ovld vload16(size_t offset, const __constant ushort *p);
+int16 __ovld vload16(size_t offset, const __constant int *p);
+uint16 __ovld vload16(size_t offset, const __constant uint *p);
+long16 __ovld vload16(size_t offset, const __constant long *p);
+ulong16 __ovld vload16(size_t offset, const __constant ulong *p);
+float16 __ovld vload16(size_t offset, const __constant float *p);
+#ifdef cl_khr_fp64
+double2 __ovld vload2(size_t offset, const __constant double *p);
+double3 __ovld vload3(size_t offset, const __constant double *p);
+double4 __ovld vload4(size_t offset, const __constant double *p);
+double8 __ovld vload8(size_t offset, const __constant double *p);
+double16 __ovld vload16(size_t offset, const __constant double *p);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld vload(size_t offset, const __constant half *p);
+half2 __ovld vload2(size_t offset, const __constant half *p);
+half3 __ovld vload3(size_t offset, const __constant half *p);
+half4 __ovld vload4(size_t offset, const __constant half *p);
+half8 __ovld vload8(size_t offset, const __constant half *p);
+half16 __ovld vload16(size_t offset, const __constant half *p);
+#endif //cl_khr_fp16
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+char2 __ovld vload2(size_t offset, const char *p);
+uchar2 __ovld vload2(size_t offset, const uchar *p);
+short2 __ovld vload2(size_t offset, const short *p);
+ushort2 __ovld vload2(size_t offset, const ushort *p);
+int2 __ovld vload2(size_t offset, const int *p);
+uint2 __ovld vload2(size_t offset, const uint *p);
+long2 __ovld vload2(size_t offset, const long *p);
+ulong2 __ovld vload2(size_t offset, const ulong *p);
+float2 __ovld vload2(size_t offset, const float *p);
+char3 __ovld vload3(size_t offset, const char *p);
+uchar3 __ovld vload3(size_t offset, const uchar *p);
+short3 __ovld vload3(size_t offset, const short *p);
+ushort3 __ovld vload3(size_t offset, const ushort *p);
+int3 __ovld vload3(size_t offset, const int *p);
+uint3 __ovld vload3(size_t offset, const uint *p);
+long3 __ovld vload3(size_t offset, const long *p);
+ulong3 __ovld vload3(size_t offset, const ulong *p);
+float3 __ovld vload3(size_t offset, const float *p);
+char4 __ovld vload4(size_t offset, const char *p);
+uchar4 __ovld vload4(size_t offset, const uchar *p);
+short4 __ovld vload4(size_t offset, const short *p);
+ushort4 __ovld vload4(size_t offset, const ushort *p);
+int4 __ovld vload4(size_t offset, const int *p);
+uint4 __ovld vload4(size_t offset, const uint *p);
+long4 __ovld vload4(size_t offset, const long *p);
+ulong4 __ovld vload4(size_t offset, const ulong *p);
+float4 __ovld vload4(size_t offset, const float *p);
+char8 __ovld vload8(size_t offset, const char *p);
+uchar8 __ovld vload8(size_t offset, const uchar *p);
+short8 __ovld vload8(size_t offset, const short *p);
+ushort8 __ovld vload8(size_t offset, const ushort *p);
+int8 __ovld vload8(size_t offset, const int *p);
+uint8 __ovld vload8(size_t offset, const uint *p);
+long8 __ovld vload8(size_t offset, const long *p);
+ulong8 __ovld vload8(size_t offset, const ulong *p);
+float8 __ovld vload8(size_t offset, const float *p);
+char16 __ovld vload16(size_t offset, const char *p);
+uchar16 __ovld vload16(size_t offset, const uchar *p);
+short16 __ovld vload16(size_t offset, const short *p);
+ushort16 __ovld vload16(size_t offset, const ushort *p);
+int16 __ovld vload16(size_t offset, const int *p);
+uint16 __ovld vload16(size_t offset, const uint *p);
+long16 __ovld vload16(size_t offset, const long *p);
+ulong16 __ovld vload16(size_t offset, const ulong *p);
+float16 __ovld vload16(size_t offset, const float *p);
+
+#ifdef cl_khr_fp64
+double2 __ovld vload2(size_t offset, const double *p);
+double3 __ovld vload3(size_t offset, const double *p);
+double4 __ovld vload4(size_t offset, const double *p);
+double8 __ovld vload8(size_t offset, const double *p);
+double16 __ovld vload16(size_t offset, const double *p);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld vload(size_t offset, const half *p);
+half2 __ovld vload2(size_t offset, const half *p);
+half3 __ovld vload3(size_t offset, const half *p);
+half4 __ovld vload4(size_t offset, const half *p);
+half8 __ovld vload8(size_t offset, const half *p);
+half16 __ovld vload16(size_t offset, const half *p);
+#endif //cl_khr_fp16
+#else
+char2 __ovld vload2(size_t offset, const __global char *p);
+uchar2 __ovld vload2(size_t offset, const __global uchar *p);
+short2 __ovld vload2(size_t offset, const __global short *p);
+ushort2 __ovld vload2(size_t offset, const __global ushort *p);
+int2 __ovld vload2(size_t offset, const __global int *p);
+uint2 __ovld vload2(size_t offset, const __global uint *p);
+long2 __ovld vload2(size_t offset, const __global long *p);
+ulong2 __ovld vload2(size_t offset, const __global ulong *p);
+float2 __ovld vload2(size_t offset, const __global float *p);
+char3 __ovld vload3(size_t offset, const __global char *p);
+uchar3 __ovld vload3(size_t offset, const __global uchar *p);
+short3 __ovld vload3(size_t offset, const __global short *p);
+ushort3 __ovld vload3(size_t offset, const __global ushort *p);
+int3 __ovld vload3(size_t offset, const __global int *p);
+uint3 __ovld vload3(size_t offset, const __global uint *p);
+long3 __ovld vload3(size_t offset, const __global long *p);
+ulong3 __ovld vload3(size_t offset, const __global ulong *p);
+float3 __ovld vload3(size_t offset, const __global float *p);
+char4 __ovld vload4(size_t offset, const __global char *p);
+uchar4 __ovld vload4(size_t offset, const __global uchar *p);
+short4 __ovld vload4(size_t offset, const __global short *p);
+ushort4 __ovld vload4(size_t offset, const __global ushort *p);
+int4 __ovld vload4(size_t offset, const __global int *p);
+uint4 __ovld vload4(size_t offset, const __global uint *p);
+long4 __ovld vload4(size_t offset, const __global long *p);
+ulong4 __ovld vload4(size_t offset, const __global ulong *p);
+float4 __ovld vload4(size_t offset, const __global float *p);
+char8 __ovld vload8(size_t offset, const __global char *p);
+uchar8 __ovld vload8(size_t offset, const __global uchar *p);
+short8 __ovld vload8(size_t offset, const __global short *p);
+ushort8 __ovld vload8(size_t offset, const __global ushort *p);
+int8 __ovld vload8(size_t offset, const __global int *p);
+uint8 __ovld vload8(size_t offset, const __global uint *p);
+long8 __ovld vload8(size_t offset, const __global long *p);
+ulong8 __ovld vload8(size_t offset, const __global ulong *p);
+float8 __ovld vload8(size_t offset, const __global float *p);
+char16 __ovld vload16(size_t offset, const __global char *p);
+uchar16 __ovld vload16(size_t offset, const __global uchar *p);
+short16 __ovld vload16(size_t offset, const __global short *p);
+ushort16 __ovld vload16(size_t offset, const __global ushort *p);
+int16 __ovld vload16(size_t offset, const __global int *p);
+uint16 __ovld vload16(size_t offset, const __global uint *p);
+long16 __ovld vload16(size_t offset, const __global long *p);
+ulong16 __ovld vload16(size_t offset, const __global ulong *p);
+float16 __ovld vload16(size_t offset, const __global float *p);
+char2 __ovld vload2(size_t offset, const __local char *p);
+uchar2 __ovld vload2(size_t offset, const __local uchar *p);
+short2 __ovld vload2(size_t offset, const __local short *p);
+ushort2 __ovld vload2(size_t offset, const __local ushort *p);
+int2 __ovld vload2(size_t offset, const __local int *p);
+uint2 __ovld vload2(size_t offset, const __local uint *p);
+long2 __ovld vload2(size_t offset, const __local long *p);
+ulong2 __ovld vload2(size_t offset, const __local ulong *p);
+float2 __ovld vload2(size_t offset, const __local float *p);
+char3 __ovld vload3(size_t offset, const __local char *p);
+uchar3 __ovld vload3(size_t offset, const __local uchar *p);
+short3 __ovld vload3(size_t offset, const __local short *p);
+ushort3 __ovld vload3(size_t offset, const __local ushort *p);
+int3 __ovld vload3(size_t offset, const __local int *p);
+uint3 __ovld vload3(size_t offset, const __local uint *p);
+long3 __ovld vload3(size_t offset, const __local long *p);
+ulong3 __ovld vload3(size_t offset, const __local ulong *p);
+float3 __ovld vload3(size_t offset, const __local float *p);
+char4 __ovld vload4(size_t offset, const __local char *p);
+uchar4 __ovld vload4(size_t offset, const __local uchar *p);
+short4 __ovld vload4(size_t offset, const __local short *p);
+ushort4 __ovld vload4(size_t offset, const __local ushort *p);
+int4 __ovld vload4(size_t offset, const __local int *p);
+uint4 __ovld vload4(size_t offset, const __local uint *p);
+long4 __ovld vload4(size_t offset, const __local long *p);
+ulong4 __ovld vload4(size_t offset, const __local ulong *p);
+float4 __ovld vload4(size_t offset, const __local float *p);
+char8 __ovld vload8(size_t offset, const __local char *p);
+uchar8 __ovld vload8(size_t offset, const __local uchar *p);
+short8 __ovld vload8(size_t offset, const __local short *p);
+ushort8 __ovld vload8(size_t offset, const __local ushort *p);
+int8 __ovld vload8(size_t offset, const __local int *p);
+uint8 __ovld vload8(size_t offset, const __local uint *p);
+long8 __ovld vload8(size_t offset, const __local long *p);
+ulong8 __ovld vload8(size_t offset, const __local ulong *p);
+float8 __ovld vload8(size_t offset, const __local float *p);
+char16 __ovld vload16(size_t offset, const __local char *p);
+uchar16 __ovld vload16(size_t offset, const __local uchar *p);
+short16 __ovld vload16(size_t offset, const __local short *p);
+ushort16 __ovld vload16(size_t offset, const __local ushort *p);
+int16 __ovld vload16(size_t offset, const __local int *p);
+uint16 __ovld vload16(size_t offset, const __local uint *p);
+long16 __ovld vload16(size_t offset, const __local long *p);
+ulong16 __ovld vload16(size_t offset, const __local ulong *p);
+float16 __ovld vload16(size_t offset, const __local float *p);
+char2 __ovld vload2(size_t offset, const __private char *p);
+uchar2 __ovld vload2(size_t offset, const __private uchar *p);
+short2 __ovld vload2(size_t offset, const __private short *p);
+ushort2 __ovld vload2(size_t offset, const __private ushort *p);
+int2 __ovld vload2(size_t offset, const __private int *p);
+uint2 __ovld vload2(size_t offset, const __private uint *p);
+long2 __ovld vload2(size_t offset, const __private long *p);
+ulong2 __ovld vload2(size_t offset, const __private ulong *p);
+float2 __ovld vload2(size_t offset, const __private float *p);
+char3 __ovld vload3(size_t offset, const __private char *p);
+uchar3 __ovld vload3(size_t offset, const __private uchar *p);
+short3 __ovld vload3(size_t offset, const __private short *p);
+ushort3 __ovld vload3(size_t offset, const __private ushort *p);
+int3 __ovld vload3(size_t offset, const __private int *p);
+uint3 __ovld vload3(size_t offset, const __private uint *p);
+long3 __ovld vload3(size_t offset, const __private long *p);
+ulong3 __ovld vload3(size_t offset, const __private ulong *p);
+float3 __ovld vload3(size_t offset, const __private float *p);
+char4 __ovld vload4(size_t offset, const __private char *p);
+uchar4 __ovld vload4(size_t offset, const __private uchar *p);
+short4 __ovld vload4(size_t offset, const __private short *p);
+ushort4 __ovld vload4(size_t offset, const __private ushort *p);
+int4 __ovld vload4(size_t offset, const __private int *p);
+uint4 __ovld vload4(size_t offset, const __private uint *p);
+long4 __ovld vload4(size_t offset, const __private long *p);
+ulong4 __ovld vload4(size_t offset, const __private ulong *p);
+float4 __ovld vload4(size_t offset, const __private float *p);
+char8 __ovld vload8(size_t offset, const __private char *p);
+uchar8 __ovld vload8(size_t offset, const __private uchar *p);
+short8 __ovld vload8(size_t offset, const __private short *p);
+ushort8 __ovld vload8(size_t offset, const __private ushort *p);
+int8 __ovld vload8(size_t offset, const __private int *p);
+uint8 __ovld vload8(size_t offset, const __private uint *p);
+long8 __ovld vload8(size_t offset, const __private long *p);
+ulong8 __ovld vload8(size_t offset, const __private ulong *p);
+float8 __ovld vload8(size_t offset, const __private float *p);
+char16 __ovld vload16(size_t offset, const __private char *p);
+uchar16 __ovld vload16(size_t offset, const __private uchar *p);
+short16 __ovld vload16(size_t offset, const __private short *p);
+ushort16 __ovld vload16(size_t offset, const __private ushort *p);
+int16 __ovld vload16(size_t offset, const __private int *p);
+uint16 __ovld vload16(size_t offset, const __private uint *p);
+long16 __ovld vload16(size_t offset, const __private long *p);
+ulong16 __ovld vload16(size_t offset, const __private ulong *p);
+float16 __ovld vload16(size_t offset, const __private float *p);
+
+#ifdef cl_khr_fp64
+double2 __ovld vload2(size_t offset, const __global double *p);
+double3 __ovld vload3(size_t offset, const __global double *p);
+double4 __ovld vload4(size_t offset, const __global double *p);
+double8 __ovld vload8(size_t offset, const __global double *p);
+double16 __ovld vload16(size_t offset, const __global double *p);
+double2 __ovld vload2(size_t offset, const __local double *p);
+double3 __ovld vload3(size_t offset, const __local double *p);
+double4 __ovld vload4(size_t offset, const __local double *p);
+double8 __ovld vload8(size_t offset, const __local double *p);
+double16 __ovld vload16(size_t offset, const __local double *p);
+double2 __ovld vload2(size_t offset, const __private double *p);
+double3 __ovld vload3(size_t offset, const __private double *p);
+double4 __ovld vload4(size_t offset, const __private double *p);
+double8 __ovld vload8(size_t offset, const __private double *p);
+double16 __ovld vload16(size_t offset, const __private double *p);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld vload(size_t offset, const __global half *p);
+half2 __ovld vload2(size_t offset, const __global half *p);
+half3 __ovld vload3(size_t offset, const __global half *p);
+half4 __ovld vload4(size_t offset, const __global half *p);
+half8 __ovld vload8(size_t offset, const __global half *p);
+half16 __ovld vload16(size_t offset, const __global half *p);
+half __ovld vload(size_t offset, const __local half *p);
+half2 __ovld vload2(size_t offset, const __local half *p);
+half3 __ovld vload3(size_t offset, const __local half *p);
+half4 __ovld vload4(size_t offset, const __local half *p);
+half8 __ovld vload8(size_t offset, const __local half *p);
+half16 __ovld vload16(size_t offset, const __local half *p);
+half __ovld vload(size_t offset, const __private half *p);
+half2 __ovld vload2(size_t offset, const __private half *p);
+half3 __ovld vload3(size_t offset, const __private half *p);
+half4 __ovld vload4(size_t offset, const __private half *p);
+half8 __ovld vload8(size_t offset, const __private half *p);
+half16 __ovld vload16(size_t offset, const __private half *p);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld vstore2(char2 data, size_t offset, char *p);
+void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
+void __ovld vstore2(short2 data, size_t offset, short *p);
+void __ovld vstore2(ushort2 data, size_t offset, ushort *p);
+void __ovld vstore2(int2 data, size_t offset, int *p);
+void __ovld vstore2(uint2 data, size_t offset, uint *p);
+void __ovld vstore2(long2 data, size_t offset, long *p);
+void __ovld vstore2(ulong2 data, size_t offset, ulong *p);
+void __ovld vstore2(float2 data, size_t offset, float *p);
+void __ovld vstore3(char3 data, size_t offset, char *p);
+void __ovld vstore3(uchar3 data, size_t offset, uchar *p);
+void __ovld vstore3(short3 data, size_t offset, short *p);
+void __ovld vstore3(ushort3 data, size_t offset, ushort *p);
+void __ovld vstore3(int3 data, size_t offset, int *p);
+void __ovld vstore3(uint3 data, size_t offset, uint *p);
+void __ovld vstore3(long3 data, size_t offset, long *p);
+void __ovld vstore3(ulong3 data, size_t offset, ulong *p);
+void __ovld vstore3(float3 data, size_t offset, float *p);
+void __ovld vstore4(char4 data, size_t offset, char *p);
+void __ovld vstore4(uchar4 data, size_t offset, uchar *p);
+void __ovld vstore4(short4 data, size_t offset, short *p);
+void __ovld vstore4(ushort4 data, size_t offset, ushort *p);
+void __ovld vstore4(int4 data, size_t offset, int *p);
+void __ovld vstore4(uint4 data, size_t offset, uint *p);
+void __ovld vstore4(long4 data, size_t offset, long *p);
+void __ovld vstore4(ulong4 data, size_t offset, ulong *p);
+void __ovld vstore4(float4 data, size_t offset, float *p);
+void __ovld vstore8(char8 data, size_t offset, char *p);
+void __ovld vstore8(uchar8 data, size_t offset, uchar *p);
+void __ovld vstore8(short8 data, size_t offset, short *p);
+void __ovld vstore8(ushort8 data, size_t offset, ushort *p);
+void __ovld vstore8(int8 data, size_t offset, int *p);
+void __ovld vstore8(uint8 data, size_t offset, uint *p);
+void __ovld vstore8(long8 data, size_t offset, long *p);
+void __ovld vstore8(ulong8 data, size_t offset, ulong *p);
+void __ovld vstore8(float8 data, size_t offset, float *p);
+void __ovld vstore16(char16 data, size_t offset, char *p);
+void __ovld vstore16(uchar16 data, size_t offset, uchar *p);
+void __ovld vstore16(short16 data, size_t offset, short *p);
+void __ovld vstore16(ushort16 data, size_t offset, ushort *p);
+void __ovld vstore16(int16 data, size_t offset, int *p);
+void __ovld vstore16(uint16 data, size_t offset, uint *p);
+void __ovld vstore16(long16 data, size_t offset, long *p);
+void __ovld vstore16(ulong16 data, size_t offset, ulong *p);
+void __ovld vstore16(float16 data, size_t offset, float *p);
+#ifdef cl_khr_fp64
+void __ovld vstore2(double2 data, size_t offset, double *p);
+void __ovld vstore3(double3 data, size_t offset, double *p);
+void __ovld vstore4(double4 data, size_t offset, double *p);
+void __ovld vstore8(double8 data, size_t offset, double *p);
+void __ovld vstore16(double16 data, size_t offset, double *p);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld vstore(half data, size_t offset, half *p);
+void __ovld vstore2(half2 data, size_t offset, half *p);
+void __ovld vstore3(half3 data, size_t offset, half *p);
+void __ovld vstore4(half4 data, size_t offset, half *p);
+void __ovld vstore8(half8 data, size_t offset, half *p);
+void __ovld vstore16(half16 data, size_t offset, half *p);
+#endif //cl_khr_fp16
+#else
+void __ovld vstore2(char2 data, size_t offset, __global char *p);
+void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
+void __ovld vstore2(short2 data, size_t offset, __global short *p);
+void __ovld vstore2(ushort2 data, size_t offset, __global ushort *p);
+void __ovld vstore2(int2 data, size_t offset, __global int *p);
+void __ovld vstore2(uint2 data, size_t offset, __global uint *p);
+void __ovld vstore2(long2 data, size_t offset, __global long *p);
+void __ovld vstore2(ulong2 data, size_t offset, __global ulong *p);
+void __ovld vstore2(float2 data, size_t offset, __global float *p);
+void __ovld vstore3(char3 data, size_t offset, __global char *p);
+void __ovld vstore3(uchar3 data, size_t offset, __global uchar *p);
+void __ovld vstore3(short3 data, size_t offset, __global short *p);
+void __ovld vstore3(ushort3 data, size_t offset, __global ushort *p);
+void __ovld vstore3(int3 data, size_t offset, __global int *p);
+void __ovld vstore3(uint3 data, size_t offset, __global uint *p);
+void __ovld vstore3(long3 data, size_t offset, __global long *p);
+void __ovld vstore3(ulong3 data, size_t offset, __global ulong *p);
+void __ovld vstore3(float3 data, size_t offset, __global float *p);
+void __ovld vstore4(char4 data, size_t offset, __global char *p);
+void __ovld vstore4(uchar4 data, size_t offset, __global uchar *p);
+void __ovld vstore4(short4 data, size_t offset, __global short *p);
+void __ovld vstore4(ushort4 data, size_t offset, __global ushort *p);
+void __ovld vstore4(int4 data, size_t offset, __global int *p);
+void __ovld vstore4(uint4 data, size_t offset, __global uint *p);
+void __ovld vstore4(long4 data, size_t offset, __global long *p);
+void __ovld vstore4(ulong4 data, size_t offset, __global ulong *p);
+void __ovld vstore4(float4 data, size_t offset, __global float *p);
+void __ovld vstore8(char8 data, size_t offset, __global char *p);
+void __ovld vstore8(uchar8 data, size_t offset, __global uchar *p);
+void __ovld vstore8(short8 data, size_t offset, __global short *p);
+void __ovld vstore8(ushort8 data, size_t offset, __global ushort *p);
+void __ovld vstore8(int8 data, size_t offset, __global int *p);
+void __ovld vstore8(uint8 data, size_t offset, __global uint *p);
+void __ovld vstore8(long8 data, size_t offset, __global long *p);
+void __ovld vstore8(ulong8 data, size_t offset, __global ulong *p);
+void __ovld vstore8(float8 data, size_t offset, __global float *p);
+void __ovld vstore16(char16 data, size_t offset, __global char *p);
+void __ovld vstore16(uchar16 data, size_t offset, __global uchar *p);
+void __ovld vstore16(short16 data, size_t offset, __global short *p);
+void __ovld vstore16(ushort16 data, size_t offset, __global ushort *p);
+void __ovld vstore16(int16 data, size_t offset, __global int *p);
+void __ovld vstore16(uint16 data, size_t offset, __global uint *p);
+void __ovld vstore16(long16 data, size_t offset, __global long *p);
+void __ovld vstore16(ulong16 data, size_t offset, __global ulong *p);
+void __ovld vstore16(float16 data, size_t offset, __global float *p);
+void __ovld vstore2(char2 data, size_t offset, __local char *p);
+void __ovld vstore2(uchar2 data, size_t offset, __local uchar *p);
+void __ovld vstore2(short2 data, size_t offset, __local short *p);
+void __ovld vstore2(ushort2 data, size_t offset, __local ushort *p);
+void __ovld vstore2(int2 data, size_t offset, __local int *p);
+void __ovld vstore2(uint2 data, size_t offset, __local uint *p);
+void __ovld vstore2(long2 data, size_t offset, __local long *p);
+void __ovld vstore2(ulong2 data, size_t offset, __local ulong *p);
+void __ovld vstore2(float2 data, size_t offset, __local float *p);
+void __ovld vstore3(char3 data, size_t offset, __local char *p);
+void __ovld vstore3(uchar3 data, size_t offset, __local uchar *p);
+void __ovld vstore3(short3 data, size_t offset, __local short *p);
+void __ovld vstore3(ushort3 data, size_t offset, __local ushort *p);
+void __ovld vstore3(int3 data, size_t offset, __local int *p);
+void __ovld vstore3(uint3 data, size_t offset, __local uint *p);
+void __ovld vstore3(long3 data, size_t offset, __local long *p);
+void __ovld vstore3(ulong3 data, size_t offset, __local ulong *p);
+void __ovld vstore3(float3 data, size_t offset, __local float *p);
+void __ovld vstore4(char4 data, size_t offset, __local char *p);
+void __ovld vstore4(uchar4 data, size_t offset, __local uchar *p);
+void __ovld vstore4(short4 data, size_t offset, __local short *p);
+void __ovld vstore4(ushort4 data, size_t offset, __local ushort *p);
+void __ovld vstore4(int4 data, size_t offset, __local int *p);
+void __ovld vstore4(uint4 data, size_t offset, __local uint *p);
+void __ovld vstore4(long4 data, size_t offset, __local long *p);
+void __ovld vstore4(ulong4 data, size_t offset, __local ulong *p);
+void __ovld vstore4(float4 data, size_t offset, __local float *p);
+void __ovld vstore8(char8 data, size_t offset, __local char *p);
+void __ovld vstore8(uchar8 data, size_t offset, __local uchar *p);
+void __ovld vstore8(short8 data, size_t offset, __local short *p);
+void __ovld vstore8(ushort8 data, size_t offset, __local ushort *p);
+void __ovld vstore8(int8 data, size_t offset, __local int *p);
+void __ovld vstore8(uint8 data, size_t offset, __local uint *p);
+void __ovld vstore8(long8 data, size_t offset, __local long *p);
+void __ovld vstore8(ulong8 data, size_t offset, __local ulong *p);
+void __ovld vstore8(float8 data, size_t offset, __local float *p);
+void __ovld vstore16(char16 data, size_t offset, __local char *p);
+void __ovld vstore16(uchar16 data, size_t offset, __local uchar *p);
+void __ovld vstore16(short16 data, size_t offset, __local short *p);
+void __ovld vstore16(ushort16 data, size_t offset, __local ushort *p);
+void __ovld vstore16(int16 data, size_t offset, __local int *p);
+void __ovld vstore16(uint16 data, size_t offset, __local uint *p);
+void __ovld vstore16(long16 data, size_t offset, __local long *p);
+void __ovld vstore16(ulong16 data, size_t offset, __local ulong *p);
+void __ovld vstore16(float16 data, size_t offset, __local float *p);
+void __ovld vstore2(char2 data, size_t offset, __private char *p);
+void __ovld vstore2(uchar2 data, size_t offset, __private uchar *p);
+void __ovld vstore2(short2 data, size_t offset, __private short *p);
+void __ovld vstore2(ushort2 data, size_t offset, __private ushort *p);
+void __ovld vstore2(int2 data, size_t offset, __private int *p);
+void __ovld vstore2(uint2 data, size_t offset, __private uint *p);
+void __ovld vstore2(long2 data, size_t offset, __private long *p);
+void __ovld vstore2(ulong2 data, size_t offset, __private ulong *p);
+void __ovld vstore2(float2 data, size_t offset, __private float *p);
+void __ovld vstore3(char3 data, size_t offset, __private char *p);
+void __ovld vstore3(uchar3 data, size_t offset, __private uchar *p);
+void __ovld vstore3(short3 data, size_t offset, __private short *p);
+void __ovld vstore3(ushort3 data, size_t offset, __private ushort *p);
+void __ovld vstore3(int3 data, size_t offset, __private int *p);
+void __ovld vstore3(uint3 data, size_t offset, __private uint *p);
+void __ovld vstore3(long3 data, size_t offset, __private long *p);
+void __ovld vstore3(ulong3 data, size_t offset, __private ulong *p);
+void __ovld vstore3(float3 data, size_t offset, __private float *p);
+void __ovld vstore4(char4 data, size_t offset, __private char *p);
+void __ovld vstore4(uchar4 data, size_t offset, __private uchar *p);
+void __ovld vstore4(short4 data, size_t offset, __private short *p);
+void __ovld vstore4(ushort4 data, size_t offset, __private ushort *p);
+void __ovld vstore4(int4 data, size_t offset, __private int *p);
+void __ovld vstore4(uint4 data, size_t offset, __private uint *p);
+void __ovld vstore4(long4 data, size_t offset, __private long *p);
+void __ovld vstore4(ulong4 data, size_t offset, __private ulong *p);
+void __ovld vstore4(float4 data, size_t offset, __private float *p);
+void __ovld vstore8(char8 data, size_t offset, __private char *p);
+void __ovld vstore8(uchar8 data, size_t offset, __private uchar *p);
+void __ovld vstore8(short8 data, size_t offset, __private short *p);
+void __ovld vstore8(ushort8 data, size_t offset, __private ushort *p);
+void __ovld vstore8(int8 data, size_t offset, __private int *p);
+void __ovld vstore8(uint8 data, size_t offset, __private uint *p);
+void __ovld vstore8(long8 data, size_t offset, __private long *p);
+void __ovld vstore8(ulong8 data, size_t offset, __private ulong *p);
+void __ovld vstore8(float8 data, size_t offset, __private float *p);
+void __ovld vstore16(char16 data, size_t offset, __private char *p);
+void __ovld vstore16(uchar16 data, size_t offset, __private uchar *p);
+void __ovld vstore16(short16 data, size_t offset, __private short *p);
+void __ovld vstore16(ushort16 data, size_t offset, __private ushort *p);
+void __ovld vstore16(int16 data, size_t offset, __private int *p);
+void __ovld vstore16(uint16 data, size_t offset, __private uint *p);
+void __ovld vstore16(long16 data, size_t offset, __private long *p);
+void __ovld vstore16(ulong16 data, size_t offset, __private ulong *p);
+void __ovld vstore16(float16 data, size_t offset, __private float *p);
+#ifdef cl_khr_fp64
+void __ovld vstore2(double2 data, size_t offset, __global double *p);
+void __ovld vstore3(double3 data, size_t offset, __global double *p);
+void __ovld vstore4(double4 data, size_t offset, __global double *p);
+void __ovld vstore8(double8 data, size_t offset, __global double *p);
+void __ovld vstore16(double16 data, size_t offset, __global double *p);
+void __ovld vstore2(double2 data, size_t offset, __local double *p);
+void __ovld vstore3(double3 data, size_t offset, __local double *p);
+void __ovld vstore4(double4 data, size_t offset, __local double *p);
+void __ovld vstore8(double8 data, size_t offset, __local double *p);
+void __ovld vstore16(double16 data, size_t offset, __local double *p);
+void __ovld vstore2(double2 data, size_t offset, __private double *p);
+void __ovld vstore3(double3 data, size_t offset, __private double *p);
+void __ovld vstore4(double4 data, size_t offset, __private double *p);
+void __ovld vstore8(double8 data, size_t offset, __private double *p);
+void __ovld vstore16(double16 data, size_t offset, __private double *p);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld vstore(half data, size_t offset, __global half *p);
+void __ovld vstore2(half2 data, size_t offset, __global half *p);
+void __ovld vstore3(half3 data, size_t offset, __global half *p);
+void __ovld vstore4(half4 data, size_t offset, __global half *p);
+void __ovld vstore8(half8 data, size_t offset, __global half *p);
+void __ovld vstore16(half16 data, size_t offset, __global half *p);
+void __ovld vstore(half data, size_t offset, __local half *p);
+void __ovld vstore2(half2 data, size_t offset, __local half *p);
+void __ovld vstore3(half3 data, size_t offset, __local half *p);
+void __ovld vstore4(half4 data, size_t offset, __local half *p);
+void __ovld vstore8(half8 data, size_t offset, __local half *p);
+void __ovld vstore16(half16 data, size_t offset, __local half *p);
+void __ovld vstore(half data, size_t offset, __private half *p);
+void __ovld vstore2(half2 data, size_t offset, __private half *p);
+void __ovld vstore3(half3 data, size_t offset, __private half *p);
+void __ovld vstore4(half4 data, size_t offset, __private half *p);
+void __ovld vstore8(half8 data, size_t offset, __private half *p);
+void __ovld vstore16(half16 data, size_t offset, __private half *p);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Read sizeof (half) bytes of data from address
+ * (p + offset). The data read is interpreted as a
+ * half value. The half value is converted to a
+ * float value and the float value is returned.
+ * The read address computed as (p + offset)
+ * must be 16-bit aligned.
+ */
+float __ovld vload_half(size_t offset, const __constant half *p);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld vload_half(size_t offset, const half *p);
+#else
+float __ovld vload_half(size_t offset, const __global half *p);
+float __ovld vload_half(size_t offset, const __local half *p);
+float __ovld vload_half(size_t offset, const __private half *p);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Read sizeof (halfn) bytes of data from address
+ * (p + (offset * n)). The data read is interpreted
+ * as a halfn value. The halfn value read is
+ * converted to a floatn value and the floatn
+ * value is returned. The read address computed
+ * as (p + (offset * n)) must be 16-bit aligned.
+ */
+float2 __ovld vload_half2(size_t offset, const __constant half *p);
+float3 __ovld vload_half3(size_t offset, const __constant half *p);
+float4 __ovld vload_half4(size_t offset, const __constant half *p);
+float8 __ovld vload_half8(size_t offset, const __constant half *p);
+float16 __ovld vload_half16(size_t offset, const __constant half *p);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float2 __ovld vload_half2(size_t offset, const half *p);
+float3 __ovld vload_half3(size_t offset, const half *p);
+float4 __ovld vload_half4(size_t offset, const half *p);
+float8 __ovld vload_half8(size_t offset, const half *p);
+float16 __ovld vload_half16(size_t offset, const half *p);
+#else
+float2 __ovld vload_half2(size_t offset, const __global half *p);
+float3 __ovld vload_half3(size_t offset, const __global half *p);
+float4 __ovld vload_half4(size_t offset, const __global half *p);
+float8 __ovld vload_half8(size_t offset, const __global half *p);
+float16 __ovld vload_half16(size_t offset, const __global half *p);
+float2 __ovld vload_half2(size_t offset, const __local half *p);
+float3 __ovld vload_half3(size_t offset, const __local half *p);
+float4 __ovld vload_half4(size_t offset, const __local half *p);
+float8 __ovld vload_half8(size_t offset, const __local half *p);
+float16 __ovld vload_half16(size_t offset, const __local half *p);
+float2 __ovld vload_half2(size_t offset, const __private half *p);
+float3 __ovld vload_half3(size_t offset, const __private half *p);
+float4 __ovld vload_half4(size_t offset, const __private half *p);
+float8 __ovld vload_half8(size_t offset, const __private half *p);
+float16 __ovld vload_half16(size_t offset, const __private half *p);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * The float value given by data is first
+ * converted to a half value using the appropriate
+ * rounding mode. The half value is then written
+ * to address computed as (p + offset). The
+ * address computed as (p + offset) must be 16-
+ * bit aligned.
+ * vstore_half use the current rounding mode.
+ * The default current rounding mode is round to
+ * nearest even.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld vstore_half(float data, size_t offset, half *p);
+void __ovld vstore_half_rte(float data, size_t offset, half *p);
+void __ovld vstore_half_rtz(float data, size_t offset, half *p);
+void __ovld vstore_half_rtp(float data, size_t offset, half *p);
+void __ovld vstore_half_rtn(float data, size_t offset, half *p);
+#ifdef cl_khr_fp64
+void __ovld vstore_half(double data, size_t offset, half *p);
+void __ovld vstore_half_rte(double data, size_t offset, half *p);
+void __ovld vstore_half_rtz(double data, size_t offset, half *p);
+void __ovld vstore_half_rtp(double data, size_t offset, half *p);
+void __ovld vstore_half_rtn(double data, size_t offset, half *p);
+#endif //cl_khr_fp64
+#else
+void __ovld vstore_half(float data, size_t offset, __global half *p);
+void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
+void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
+void __ovld vstore_half_rtp(float data, size_t offset, __global half *p);
+void __ovld vstore_half_rtn(float data, size_t offset, __global half *p);
+void __ovld vstore_half(float data, size_t offset, __local half *p);
+void __ovld vstore_half_rte(float data, size_t offset, __local half *p);
+void __ovld vstore_half_rtz(float data, size_t offset, __local half *p);
+void __ovld vstore_half_rtp(float data, size_t offset, __local half *p);
+void __ovld vstore_half_rtn(float data, size_t offset, __local half *p);
+void __ovld vstore_half(float data, size_t offset, __private half *p);
+void __ovld vstore_half_rte(float data, size_t offset, __private half *p);
+void __ovld vstore_half_rtz(float data, size_t offset, __private half *p);
+void __ovld vstore_half_rtp(float data, size_t offset, __private half *p);
+void __ovld vstore_half_rtn(float data, size_t offset, __private half *p);
+#ifdef cl_khr_fp64
+void __ovld vstore_half(double data, size_t offset, __global half *p);
+void __ovld vstore_half_rte(double data, size_t offset, __global half *p);
+void __ovld vstore_half_rtz(double data, size_t offset, __global half *p);
+void __ovld vstore_half_rtp(double data, size_t offset, __global half *p);
+void __ovld vstore_half_rtn(double data, size_t offset, __global half *p);
+void __ovld vstore_half(double data, size_t offset, __local half *p);
+void __ovld vstore_half_rte(double data, size_t offset, __local half *p);
+void __ovld vstore_half_rtz(double data, size_t offset, __local half *p);
+void __ovld vstore_half_rtp(double data, size_t offset, __local half *p);
+void __ovld vstore_half_rtn(double data, size_t offset, __local half *p);
+void __ovld vstore_half(double data, size_t offset, __private half *p);
+void __ovld vstore_half_rte(double data, size_t offset, __private half *p);
+void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
+void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
+void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
+#endif //cl_khr_fp64
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * The floatn value given by data is converted to
+ * a halfn value using the appropriate rounding
+ * mode. The halfn value is then written to
+ * address computed as (p + (offset * n)). The
+ * address computed as (p + (offset * n)) must be
+ * 16-bit aligned.
+ * vstore_halfn uses the current rounding mode.
+ * The default current rounding mode is round to
+ * nearest even.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld vstore_half2(float2 data, size_t offset, half *p);
+void __ovld vstore_half3(float3 data, size_t offset, half *p);
+void __ovld vstore_half4(float4 data, size_t offset, half *p);
+void __ovld vstore_half8(float8 data, size_t offset, half *p);
+void __ovld vstore_half16(float16 data, size_t offset, half *p);
+void __ovld vstore_half2_rte(float2 data, size_t offset, half *p);
+void __ovld vstore_half3_rte(float3 data, size_t offset, half *p);
+void __ovld vstore_half4_rte(float4 data, size_t offset, half *p);
+void __ovld vstore_half8_rte(float8 data, size_t offset, half *p);
+void __ovld vstore_half16_rte(float16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p);
+#ifdef cl_khr_fp64
+void __ovld vstore_half2(double2 data, size_t offset, half *p);
+void __ovld vstore_half3(double3 data, size_t offset, half *p);
+void __ovld vstore_half4(double4 data, size_t offset, half *p);
+void __ovld vstore_half8(double8 data, size_t offset, half *p);
+void __ovld vstore_half16(double16 data, size_t offset, half *p);
+void __ovld vstore_half2_rte(double2 data, size_t offset, half *p);
+void __ovld vstore_half3_rte(double3 data, size_t offset, half *p);
+void __ovld vstore_half4_rte(double4 data, size_t offset, half *p);
+void __ovld vstore_half8_rte(double8 data, size_t offset, half *p);
+void __ovld vstore_half16_rte(double16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtz(double2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtz(double3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtz(double4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtz(double8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtz(double16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtp(double2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtp(double3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtp(double4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtp(double8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtp(double16 data, size_t offset, half *p);
+void __ovld vstore_half2_rtn(double2 data, size_t offset, half *p);
+void __ovld vstore_half3_rtn(double3 data, size_t offset, half *p);
+void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p);
+void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
+void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
+#endif //cl_khr_fp64
+#else
+void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
+void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
+void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
+void __ovld vstore_half8(float8 data, size_t offset, __global half *p);
+void __ovld vstore_half16(float16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rte(float2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rte(float3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rte(float4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rte(float8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rte(float16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtz(float2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtz(float3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtz(float4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtz(float8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtz(float16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtp(float2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtp(float3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtp(float4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtp(float8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtp(float16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtn(float2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtn(float3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtn(float4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtn(float8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtn(float16 data, size_t offset, __global half *p);
+void __ovld vstore_half2(float2 data, size_t offset, __local half *p);
+void __ovld vstore_half3(float3 data, size_t offset, __local half *p);
+void __ovld vstore_half4(float4 data, size_t offset, __local half *p);
+void __ovld vstore_half8(float8 data, size_t offset, __local half *p);
+void __ovld vstore_half16(float16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rte(float2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rte(float3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rte(float4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rte(float8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rte(float16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtz(float2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtz(float3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtz(float4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtz(float8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtz(float16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtp(float2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtp(float3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtp(float4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtp(float8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtp(float16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtn(float2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtn(float3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtn(float4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtn(float8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtn(float16 data, size_t offset, __local half *p);
+void __ovld vstore_half2(float2 data, size_t offset, __private half *p);
+void __ovld vstore_half3(float3 data, size_t offset, __private half *p);
+void __ovld vstore_half4(float4 data, size_t offset, __private half *p);
+void __ovld vstore_half8(float8 data, size_t offset, __private half *p);
+void __ovld vstore_half16(float16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rte(float2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rte(float3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rte(float4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rte(float8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rte(float16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtz(float2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtz(float3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtz(float4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtz(float8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtz(float16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtp(float2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtp(float3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtp(float4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtp(float8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtp(float16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtn(float2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtn(float3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtn(float4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtn(float8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtn(float16 data, size_t offset, __private half *p);
+#ifdef cl_khr_fp64
+void __ovld vstore_half2(double2 data, size_t offset, __global half *p);
+void __ovld vstore_half3(double3 data, size_t offset, __global half *p);
+void __ovld vstore_half4(double4 data, size_t offset, __global half *p);
+void __ovld vstore_half8(double8 data, size_t offset, __global half *p);
+void __ovld vstore_half16(double16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rte(double2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rte(double3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rte(double4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rte(double8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rte(double16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtz(double2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtz(double3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtz(double4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtz(double8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtz(double16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtp(double2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtp(double3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtp(double4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtp(double8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtp(double16 data, size_t offset, __global half *p);
+void __ovld vstore_half2_rtn(double2 data, size_t offset, __global half *p);
+void __ovld vstore_half3_rtn(double3 data, size_t offset, __global half *p);
+void __ovld vstore_half4_rtn(double4 data, size_t offset, __global half *p);
+void __ovld vstore_half8_rtn(double8 data, size_t offset, __global half *p);
+void __ovld vstore_half16_rtn(double16 data, size_t offset, __global half *p);
+void __ovld vstore_half2(double2 data, size_t offset, __local half *p);
+void __ovld vstore_half3(double3 data, size_t offset, __local half *p);
+void __ovld vstore_half4(double4 data, size_t offset, __local half *p);
+void __ovld vstore_half8(double8 data, size_t offset, __local half *p);
+void __ovld vstore_half16(double16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rte(double2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rte(double3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rte(double4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rte(double8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rte(double16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtz(double2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtz(double3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtz(double4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtz(double8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtz(double16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtp(double2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtp(double3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtp(double4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtp(double8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtp(double16 data, size_t offset, __local half *p);
+void __ovld vstore_half2_rtn(double2 data, size_t offset, __local half *p);
+void __ovld vstore_half3_rtn(double3 data, size_t offset, __local half *p);
+void __ovld vstore_half4_rtn(double4 data, size_t offset, __local half *p);
+void __ovld vstore_half8_rtn(double8 data, size_t offset, __local half *p);
+void __ovld vstore_half16_rtn(double16 data, size_t offset, __local half *p);
+void __ovld vstore_half2(double2 data, size_t offset, __private half *p);
+void __ovld vstore_half3(double3 data, size_t offset, __private half *p);
+void __ovld vstore_half4(double4 data, size_t offset, __private half *p);
+void __ovld vstore_half8(double8 data, size_t offset, __private half *p);
+void __ovld vstore_half16(double16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rte(double2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rte(double3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rte(double4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rte(double8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rte(double16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtz(double2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtz(double3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtz(double4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtz(double8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtz(double16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtp(double2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtp(double3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtp(double4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtp(double8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtp(double16 data, size_t offset, __private half *p);
+void __ovld vstore_half2_rtn(double2 data, size_t offset, __private half *p);
+void __ovld vstore_half3_rtn(double3 data, size_t offset, __private half *p);
+void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
+void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
+void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
+#endif //cl_khr_fp64
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
+ * bytes of data from address (p + (offset * n)).
+ * The data read is interpreted as a halfn value.
+ * The halfn value read is converted to a floatn
+ * value and the floatn value is returned.
+ * The address computed as (p + (offset * n))
+ * must be aligned to sizeof (halfn) bytes.
+ * For n = 3, vloada_half3 reads a half3 from
+ * address (p + (offset * 4)) and returns a float3.
+ * The address computed as (p + (offset * 4))
+ * must be aligned to sizeof (half) * 4 bytes.
+ */
+float __ovld vloada_half(size_t offset, const __constant half *p);
+float2 __ovld vloada_half2(size_t offset, const __constant half *p);
+float3 __ovld vloada_half3(size_t offset, const __constant half *p);
+float4 __ovld vloada_half4(size_t offset, const __constant half *p);
+float8 __ovld vloada_half8(size_t offset, const __constant half *p);
+float16 __ovld vloada_half16(size_t offset, const __constant half *p);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float __ovld vloada_half(size_t offset, const half *p);
+float2 __ovld vloada_half2(size_t offset, const half *p);
+float3 __ovld vloada_half3(size_t offset, const half *p);
+float4 __ovld vloada_half4(size_t offset, const half *p);
+float8 __ovld vloada_half8(size_t offset, const half *p);
+float16 __ovld vloada_half16(size_t offset, const half *p);
+#else
+float __ovld vloada_half(size_t offset, const __global half *p);
+float2 __ovld vloada_half2(size_t offset, const __global half *p);
+float3 __ovld vloada_half3(size_t offset, const __global half *p);
+float4 __ovld vloada_half4(size_t offset, const __global half *p);
+float8 __ovld vloada_half8(size_t offset, const __global half *p);
+float16 __ovld vloada_half16(size_t offset, const __global half *p);
+float __ovld vloada_half(size_t offset, const __local half *p);
+float2 __ovld vloada_half2(size_t offset, const __local half *p);
+float3 __ovld vloada_half3(size_t offset, const __local half *p);
+float4 __ovld vloada_half4(size_t offset, const __local half *p);
+float8 __ovld vloada_half8(size_t offset, const __local half *p);
+float16 __ovld vloada_half16(size_t offset, const __local half *p);
+float __ovld vloada_half(size_t offset, const __private half *p);
+float2 __ovld vloada_half2(size_t offset, const __private half *p);
+float3 __ovld vloada_half3(size_t offset, const __private half *p);
+float4 __ovld vloada_half4(size_t offset, const __private half *p);
+float8 __ovld vloada_half8(size_t offset, const __private half *p);
+float16 __ovld vloada_half16(size_t offset, const __private half *p);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * The floatn value given by data is converted to
+ * a halfn value using the appropriate rounding
+ * mode.
+ * For n = 1, 2, 4, 8 and 16, the halfn value is
+ * written to the address computed as (p + (offset
+ * * n)). The address computed as (p + (offset *
+ * n)) must be aligned to sizeof (halfn) bytes.
+ * For n = 3, the half3 value is written to the
+ * address computed as (p + (offset * 4)). The
+ * address computed as (p + (offset * 4)) must be
+ * aligned to sizeof (half) * 4 bytes.
+ * vstorea_halfn uses the current rounding
+ * mode. The default current rounding mode is
+ * round to nearest even.
+ */
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld vstorea_half(float data, size_t offset, half *p);
+void __ovld vstorea_half2(float2 data, size_t offset, half *p);
+void __ovld vstorea_half3(float3 data, size_t offset, half *p);
+void __ovld vstorea_half4(float4 data, size_t offset, half *p);
+void __ovld vstorea_half8(float8 data, size_t offset, half *p);
+void __ovld vstorea_half16(float16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rte(float data, size_t offset, half *p);
+void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtz(float data, size_t offset, half *p);
+void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtp(float data, size_t offset, half *p);
+void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtn(float data, size_t offset, half *p);
+void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p);
+
+#ifdef cl_khr_fp64
+void __ovld vstorea_half(double data, size_t offset, half *p);
+void __ovld vstorea_half2(double2 data, size_t offset, half *p);
+void __ovld vstorea_half3(double3 data, size_t offset, half *p);
+void __ovld vstorea_half4(double4 data, size_t offset, half *p);
+void __ovld vstorea_half8(double8 data, size_t offset, half *p);
+void __ovld vstorea_half16(double16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rte(double data, size_t offset, half *p);
+void __ovld vstorea_half2_rte(double2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rte(double3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rte(double4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rte(double8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rte(double16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtz(double data, size_t offset, half *p);
+void __ovld vstorea_half2_rtz(double2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtz(double3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtz(double4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtz(double8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtz(double16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtp(double data, size_t offset, half *p);
+void __ovld vstorea_half2_rtp(double2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtp(double3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtp(double4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtp(double8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtp(double16 data, size_t offset, half *p);
+
+void __ovld vstorea_half_rtn(double data, size_t offset, half *p);
+void __ovld vstorea_half2_rtn(double2 data, size_t offset, half *p);
+void __ovld vstorea_half3_rtn(double3 data, size_t offset, half *p);
+void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p);
+void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
+void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
+#endif //cl_khr_fp64
+
+#else
+void __ovld vstorea_half(float data, size_t offset, __global half *p);
+void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8(float8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16(float16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rte(float data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rte(float2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rte(float3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rte(float4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rte(float8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rte(float16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtz(float data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtz(float2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtz(float3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtz(float4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtz(float8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtz(float16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtp(float data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtp(float2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtp(float3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtp(float4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtp(float8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtp(float16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtn(float data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtn(float2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtn(float3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtn(float4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtn(float8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtn(float16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half(float data, size_t offset, __local half *p);
+void __ovld vstorea_half2(float2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3(float3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4(float4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8(float8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16(float16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rte(float data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rte(float2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rte(float3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rte(float4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rte(float8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rte(float16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtz(float data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtz(float2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtz(float3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtz(float4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtz(float8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtz(float16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtp(float data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtp(float2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtp(float3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtp(float4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtp(float8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtp(float16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtn(float data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtn(float2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtn(float3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtn(float4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtn(float8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtn(float16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half(float data, size_t offset, __private half *p);
+void __ovld vstorea_half2(float2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3(float3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4(float4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8(float8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16(float16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rte(float data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rte(float2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rte(float3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rte(float4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rte(float8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rte(float16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtz(float data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtz(float2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rtz(float3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rtz(float4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rtz(float8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rtz(float16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtp(float data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtp(float2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rtp(float3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rtp(float4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rtp(float8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rtp(float16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtn(float data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtn(float2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rtn(float3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rtn(float4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rtn(float8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rtn(float16 data, size_t offset, __private half *p);
+
+#ifdef cl_khr_fp64
+void __ovld vstorea_half(double data, size_t offset, __global half *p);
+void __ovld vstorea_half2(double2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3(double3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4(double4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8(double8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16(double16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rte(double data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rte(double2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rte(double3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rte(double4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rte(double8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rte(double16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtz(double data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtz(double2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtz(double3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtz(double4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtz(double8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtz(double16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtp(double data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtp(double2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtp(double3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtp(double4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtp(double8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtp(double16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half_rtn(double data, size_t offset, __global half *p);
+void __ovld vstorea_half2_rtn(double2 data, size_t offset, __global half *p);
+void __ovld vstorea_half3_rtn(double3 data, size_t offset, __global half *p);
+void __ovld vstorea_half4_rtn(double4 data, size_t offset, __global half *p);
+void __ovld vstorea_half8_rtn(double8 data, size_t offset, __global half *p);
+void __ovld vstorea_half16_rtn(double16 data, size_t offset, __global half *p);
+
+void __ovld vstorea_half(double data, size_t offset, __local half *p);
+void __ovld vstorea_half2(double2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3(double3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4(double4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8(double8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16(double16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rte(double data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rte(double2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rte(double3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rte(double4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rte(double8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rte(double16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtz(double data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtz(double2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtz(double3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtz(double4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtz(double8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtz(double16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtp(double data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtp(double2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtp(double3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtp(double4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtp(double8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtp(double16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half_rtn(double data, size_t offset, __local half *p);
+void __ovld vstorea_half2_rtn(double2 data, size_t offset, __local half *p);
+void __ovld vstorea_half3_rtn(double3 data, size_t offset, __local half *p);
+void __ovld vstorea_half4_rtn(double4 data, size_t offset, __local half *p);
+void __ovld vstorea_half8_rtn(double8 data, size_t offset, __local half *p);
+void __ovld vstorea_half16_rtn(double16 data, size_t offset, __local half *p);
+
+void __ovld vstorea_half(double data, size_t offset, __private half *p);
+void __ovld vstorea_half2(double2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3(double3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4(double4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8(double8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16(double16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rte(double data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rte(double2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rte(double3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rte(double4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rte(double8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rte(double16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtz(double data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtz(double2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rtz(double3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rtz(double4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rtz(double8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rtz(double16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtp(double data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtp(double2 data, size_t offset, __private half *p);
+void __ovld vstorea_half3_rtp(double3 data, size_t offset, __private half *p);
+void __ovld vstorea_half4_rtp(double4 data, size_t offset, __private half *p);
+void __ovld vstorea_half8_rtp(double8 data, size_t offset, __private half *p);
+void __ovld vstorea_half16_rtp(double16 data, size_t offset, __private half *p);
+
+void __ovld vstorea_half_rtn(double data, size_t offset, __private half *p);
+void __ovld vstorea_half2_rtn(double2 data,size_t offset, __private half *p);
+void __ovld vstorea_half3_rtn(double3 data,size_t offset, __private half *p);
+void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
+void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
+void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
+#endif //cl_khr_fp64
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
+
+// Flag type and values for barrier, mem_fence, read_mem_fence, write_mem_fence
+typedef uint cl_mem_fence_flags;
+
+/**
+ * Queue a memory fence to ensure correct
+ * ordering of memory operations to local memory
+ */
+#define CLK_LOCAL_MEM_FENCE 0x01
+
+/**
+ * Queue a memory fence to ensure correct
+ * ordering of memory operations to global memory
+ */
+#define CLK_GLOBAL_MEM_FENCE 0x02
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+/**
+ * Queue a memory fence to ensure correct ordering of memory
+ * operations between work-items of a work-group to
+ * image memory.
+ */
+#define CLK_IMAGE_MEM_FENCE 0x04
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * All work-items in a work-group executing the kernel
+ * on a processor must execute this function before any
+ * are allowed to continue execution beyond the barrier.
+ * This function must be encountered by all work-items in
+ * a work-group executing the kernel.
+ * If barrier is inside a conditional statement, then all
+ * work-items must enter the conditional if any work-item
+ * enters the conditional statement and executes the
+ * barrier.
+ * If barrer is inside a loop, all work-items must execute
+ * the barrier for each iteration of the loop before any are
+ * allowed to continue execution beyond the barrier.
+ * The barrier function also queues a memory fence
+ * (reads and writes) to ensure correct ordering of
+ * memory operations to local or global memory.
+ * The flags argument specifies the memory address space
+ * and can be set to a combination of the following literal
+ * values.
+ * CLK_LOCAL_MEM_FENCE - The barrier function
+ * will either flush any variables stored in local memory
+ * or queue a memory fence to ensure correct ordering of
+ * memory operations to local memory.
+ * CLK_GLOBAL_MEM_FENCE - The barrier function
+ * will queue a memory fence to ensure correct ordering
+ * of memory operations to global memory. This can be
+ * useful when work-items, for example, write to buffer or
+ * image objects and then want to read the updated data.
+ */
+
+void __ovld barrier(cl_mem_fence_flags flags);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+typedef enum memory_scope
+{
+ memory_scope_work_item,
+ memory_scope_work_group,
+ memory_scope_device,
+ memory_scope_all_svm_devices,
+ memory_scope_sub_group
+} memory_scope;
+
+void __ovld work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
+void __ovld work_group_barrier(cl_mem_fence_flags flags);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
+
+/**
+ * Orders loads and stores of a work-item
+ * executing a kernel. This means that loads
+ * and stores preceding the mem_fence will
+ * be committed to memory before any loads
+ * and stores following the mem_fence.
+ * The flags argument specifies the memory
+ * address space and can be set to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld mem_fence(cl_mem_fence_flags flags);
+
+/**
+ * Read memory barrier that orders only
+ * loads.
+ * The flags argument specifies the memory
+ * address space and can be set to to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld read_mem_fence(cl_mem_fence_flags flags);
+
+/**
+ * Write memory barrier that orders only
+ * stores.
+ * The flags argument specifies the memory
+ * address space and can be set to to a
+ * combination of the following literal
+ * values:
+ * CLK_LOCAL_MEM_FENCE
+ * CLK_GLOBAL_MEM_FENCE.
+ */
+void __ovld write_mem_fence(cl_mem_fence_flags flags);
+
+// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+cl_mem_fence_flags __ovld get_fence(const void *ptr);
+cl_mem_fence_flags __ovld get_fence(void *ptr);
+
+/**
+ * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions
+ * and checked in Sema since they should be declared as
+ * addr gentype* to_addr (gentype*);
+ * where gentype is builtin type or user defined type.
+ */
+
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
+
+/**
+ * event_t async_work_group_copy (
+ * __global gentype *dst,
+ * const __local gentype *src,
+ * size_t num_elements,
+ * event_t event)
+ * Perform an async copy of num_elements
+ * gentype elements from src to dst. The async
+ * copy is performed by all work-items in a workgroup
+ * and this built-in function must therefore
+ * be encountered by all work-items in a workgroup
+ * executing the kernel with the same
+ * argument values; otherwise the results are
+ * undefined.
+ * Returns an event object that can be used by
+ * wait_group_events to wait for the async copy
+ * to finish. The event argument can also be used
+ * to associate the async_work_group_copy with
+ * a previous async copy allowing an event to be
+ * shared by multiple async copies; otherwise event
+ * should be zero.
+ * If event argument is non-zero, the event object
+ * supplied in event argument will be returned.
+ * This function does not perform any implicit
+ * synchronization of source data such as using a
+ * barrier before performing the copy.
+ */
+event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short *dst, const __global short *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int *dst, const __global int *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint *dst, const __global uint *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long *dst, const __global long *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float *dst, const __global float *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char *dst, const __local char *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short *dst, const __local short *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int *dst, const __local int *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint *dst, const __local uint *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long *dst, const __local long *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float *dst, const __local float *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, event_t event);
+#ifdef cl_khr_fp64
+event_t __ovld async_work_group_copy(__local double *dst, const __global double *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double *dst, const __local double *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, event_t event);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+event_t __ovld async_work_group_copy(__local half *dst, const __global half *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half *dst, const __local half *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, event_t event);
+#endif //cl_khr_fp16
+
+/**
+ * Perform an async gather of num_elements
+ * gentype elements from src to dst. The
+ * src_stride is the stride in elements for each
+ * gentype element read from src. The dst_stride
+ * is the stride in elements for each gentype
+ * element written to dst. The async gather is
+ * performed by all work-items in a work-group.
+ * This built-in function must therefore be
+ * encountered by all work-items in a work-group
+ * executing the kernel with the same argument
+ * values; otherwise the results are undefined.
+ * Returns an event object that can be used by
+ * wait_group_events to wait for the async copy
+ * to finish. The event argument can also be used
+ * to associate the
+ * async_work_group_strided_copy with a
+ * previous async copy allowing an event to be
+ * shared by multiple async copies; otherwise event
+ * should be zero.
+ * If event argument is non-zero, the event object
+ * supplied in event argument will be returned.
+ * This function does not perform any implicit
+ * synchronization of source data such as using a
+ * barrier before performing the copy.
+ */
+event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short *dst, const __global short *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int *dst, const __global int *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint *dst, const __global uint *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long *dst, const __global long *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float *dst, const __global float *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char *dst, const __local char *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short *dst, const __local short *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int *dst, const __local int *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint *dst, const __local uint *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long *dst, const __local long *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float *dst, const __local float *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, size_t dst_stride, event_t event);
+#ifdef cl_khr_fp64
+event_t __ovld async_work_group_strided_copy(__local double *dst, const __global double *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double *dst, const __local double *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, size_t dst_stride, event_t event);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+event_t __ovld async_work_group_strided_copy(__local half *dst, const __global half *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, size_t src_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half *dst, const __local half *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, size_t dst_stride, event_t event);
+#endif //cl_khr_fp16
+
+/**
+ * Wait for events that identify the
+ * async_work_group_copy operations to
+ * complete. The event objects specified in
+ * event_list will be released after the wait is
+ * performed.
+ * This function must be encountered by all workitems
+ * in a work-group executing the kernel with
+ * the same num_events and event objects specified
+ * in event_list; otherwise the results are undefined.
+ */
+void __ovld wait_group_events(int num_events, event_t *event_list);
+
+/**
+ * Prefetch num_elements * sizeof(gentype)
+ * bytes into the global cache. The prefetch
+ * instruction is applied to a work-item in a workgroup
+ * and does not affect the functional
+ * behavior of the kernel.
+ */
+void __ovld prefetch(const __global char *p, size_t num_elements);
+void __ovld prefetch(const __global uchar *p, size_t num_elements);
+void __ovld prefetch(const __global short *p, size_t num_elements);
+void __ovld prefetch(const __global ushort *p, size_t num_elements);
+void __ovld prefetch(const __global int *p, size_t num_elements);
+void __ovld prefetch(const __global uint *p, size_t num_elements);
+void __ovld prefetch(const __global long *p, size_t num_elements);
+void __ovld prefetch(const __global ulong *p, size_t num_elements);
+void __ovld prefetch(const __global float *p, size_t num_elements);
+void __ovld prefetch(const __global char2 *p, size_t num_elements);
+void __ovld prefetch(const __global uchar2 *p, size_t num_elements);
+void __ovld prefetch(const __global short2 *p, size_t num_elements);
+void __ovld prefetch(const __global ushort2 *p, size_t num_elements);
+void __ovld prefetch(const __global int2 *p, size_t num_elements);
+void __ovld prefetch(const __global uint2 *p, size_t num_elements);
+void __ovld prefetch(const __global long2 *p, size_t num_elements);
+void __ovld prefetch(const __global ulong2 *p, size_t num_elements);
+void __ovld prefetch(const __global float2 *p, size_t num_elements);
+void __ovld prefetch(const __global char3 *p, size_t num_elements);
+void __ovld prefetch(const __global uchar3 *p, size_t num_elements);
+void __ovld prefetch(const __global short3 *p, size_t num_elements);
+void __ovld prefetch(const __global ushort3 *p, size_t num_elements);
+void __ovld prefetch(const __global int3 *p, size_t num_elements);
+void __ovld prefetch(const __global uint3 *p, size_t num_elements);
+void __ovld prefetch(const __global long3 *p, size_t num_elements);
+void __ovld prefetch(const __global ulong3 *p, size_t num_elements);
+void __ovld prefetch(const __global float3 *p, size_t num_elements);
+void __ovld prefetch(const __global char4 *p, size_t num_elements);
+void __ovld prefetch(const __global uchar4 *p, size_t num_elements);
+void __ovld prefetch(const __global short4 *p, size_t num_elements);
+void __ovld prefetch(const __global ushort4 *p, size_t num_elements);
+void __ovld prefetch(const __global int4 *p, size_t num_elements);
+void __ovld prefetch(const __global uint4 *p, size_t num_elements);
+void __ovld prefetch(const __global long4 *p, size_t num_elements);
+void __ovld prefetch(const __global ulong4 *p, size_t num_elements);
+void __ovld prefetch(const __global float4 *p, size_t num_elements);
+void __ovld prefetch(const __global char8 *p, size_t num_elements);
+void __ovld prefetch(const __global uchar8 *p, size_t num_elements);
+void __ovld prefetch(const __global short8 *p, size_t num_elements);
+void __ovld prefetch(const __global ushort8 *p, size_t num_elements);
+void __ovld prefetch(const __global int8 *p, size_t num_elements);
+void __ovld prefetch(const __global uint8 *p, size_t num_elements);
+void __ovld prefetch(const __global long8 *p, size_t num_elements);
+void __ovld prefetch(const __global ulong8 *p, size_t num_elements);
+void __ovld prefetch(const __global float8 *p, size_t num_elements);
+void __ovld prefetch(const __global char16 *p, size_t num_elements);
+void __ovld prefetch(const __global uchar16 *p, size_t num_elements);
+void __ovld prefetch(const __global short16 *p, size_t num_elements);
+void __ovld prefetch(const __global ushort16 *p, size_t num_elements);
+void __ovld prefetch(const __global int16 *p, size_t num_elements);
+void __ovld prefetch(const __global uint16 *p, size_t num_elements);
+void __ovld prefetch(const __global long16 *p, size_t num_elements);
+void __ovld prefetch(const __global ulong16 *p, size_t num_elements);
+void __ovld prefetch(const __global float16 *p, size_t num_elements);
+#ifdef cl_khr_fp64
+void __ovld prefetch(const __global double *p, size_t num_elements);
+void __ovld prefetch(const __global double2 *p, size_t num_elements);
+void __ovld prefetch(const __global double3 *p, size_t num_elements);
+void __ovld prefetch(const __global double4 *p, size_t num_elements);
+void __ovld prefetch(const __global double8 *p, size_t num_elements);
+void __ovld prefetch(const __global double16 *p, size_t num_elements);
+#endif //cl_khr_fp64
+#ifdef cl_khr_fp16
+void __ovld prefetch(const __global half *p, size_t num_elements);
+void __ovld prefetch(const __global half2 *p, size_t num_elements);
+void __ovld prefetch(const __global half3 *p, size_t num_elements);
+void __ovld prefetch(const __global half4 *p, size_t num_elements);
+void __ovld prefetch(const __global half8 *p, size_t num_elements);
+void __ovld prefetch(const __global half16 *p, size_t num_elements);
+#endif // cl_khr_fp16
+
+// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
+#endif
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old + val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_add(volatile __global int *p, int val);
+unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_add(volatile __local int *p, int val);
+unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_add(volatile __global int *p, int val);
+unsigned int __ovld atom_add(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_add(volatile __local int *p, int val);
+unsigned int __ovld atom_add(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_add(volatile __global long *p, long val);
+unsigned long __ovld atom_add(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_add(volatile __local long *p, long val);
+unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old) stored at location pointed by p.
+ * Compute (old - val) and store result at location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_sub(volatile __global int *p, int val);
+unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_sub(volatile __local int *p, int val);
+unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_sub(volatile __global int *p, int val);
+unsigned int __ovld atom_sub(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_sub(volatile __local int *p, int val);
+unsigned int __ovld atom_sub(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_sub(volatile __global long *p, long val);
+unsigned long __ovld atom_sub(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_sub(volatile __local long *p, long val);
+unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Swaps the old value stored at location p
+ * with new value given by val. Returns old
+ * value.
+ */
+int __ovld atomic_xchg(volatile __global int *p, int val);
+unsigned int __ovld atomic_xchg(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_xchg(volatile __local int *p, int val);
+unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val);
+float __ovld atomic_xchg(volatile __global float *p, float val);
+float __ovld atomic_xchg(volatile __local float *p, float val);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_xchg(volatile __global int *p, int val);
+int __ovld atom_xchg(volatile __local int *p, int val);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+unsigned int __ovld atom_xchg(volatile __global unsigned int *p, unsigned int val);
+unsigned int __ovld atom_xchg(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_xchg(volatile __global long *p, long val);
+long __ovld atom_xchg(volatile __local long *p, long val);
+unsigned long __ovld atom_xchg(volatile __global unsigned long *p, unsigned long val);
+unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old + 1) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_inc(volatile __global int *p);
+unsigned int __ovld atomic_inc(volatile __global unsigned int *p);
+int __ovld atomic_inc(volatile __local int *p);
+unsigned int __ovld atomic_inc(volatile __local unsigned int *p);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_inc(volatile __global int *p);
+unsigned int __ovld atom_inc(volatile __global unsigned int *p);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_inc(volatile __local int *p);
+unsigned int __ovld atom_inc(volatile __local unsigned int *p);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_inc(volatile __global long *p);
+unsigned long __ovld atom_inc(volatile __global unsigned long *p);
+long __ovld atom_inc(volatile __local long *p);
+unsigned long __ovld atom_inc(volatile __local unsigned long *p);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old - 1) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_dec(volatile __global int *p);
+unsigned int __ovld atomic_dec(volatile __global unsigned int *p);
+int __ovld atomic_dec(volatile __local int *p);
+unsigned int __ovld atomic_dec(volatile __local unsigned int *p);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_dec(volatile __global int *p);
+unsigned int __ovld atom_dec(volatile __global unsigned int *p);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_dec(volatile __local int *p);
+unsigned int __ovld atom_dec(volatile __local unsigned int *p);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_dec(volatile __global long *p);
+unsigned long __ovld atom_dec(volatile __global unsigned long *p);
+long __ovld atom_dec(volatile __local long *p);
+unsigned long __ovld atom_dec(volatile __local unsigned long *p);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old == cmp) ? val : old and store result at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val);
+unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
+int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val);
+unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
+
+#if defined(cl_khr_global_int32_base_atomics)
+int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val);
+unsigned int __ovld atom_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_base_atomics)
+int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val);
+unsigned int __ovld atom_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_base_atomics)
+long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val);
+unsigned long __ovld atom_cmpxchg(volatile __global unsigned long *p, unsigned long cmp, unsigned long val);
+long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val);
+unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned long cmp, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * min(old, val) and store minimum value at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_min(volatile __global int *p, int val);
+unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_min(volatile __local int *p, int val);
+unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_min(volatile __global int *p, int val);
+unsigned int __ovld atom_min(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_min(volatile __local int *p, int val);
+unsigned int __ovld atom_min(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_min(volatile __global long *p, long val);
+unsigned long __ovld atom_min(volatile __global unsigned long *p, unsigned long val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+long __ovld atom_min(volatile __local long *p, long val);
+unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * max(old, val) and store maximum value at
+ * location pointed by p. The function
+ * returns old.
+ */
+int __ovld atomic_max(volatile __global int *p, int val);
+unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_max(volatile __local int *p, int val);
+unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_max(volatile __global int *p, int val);
+unsigned int __ovld atom_max(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_max(volatile __local int *p, int val);
+unsigned int __ovld atom_max(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_max(volatile __global long *p, long val);
+unsigned long __ovld atom_max(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_max(volatile __local long *p, long val);
+unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old & val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_and(volatile __global int *p, int val);
+unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_and(volatile __local int *p, int val);
+unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_and(volatile __global int *p, int val);
+unsigned int __ovld atom_and(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_and(volatile __local int *p, int val);
+unsigned int __ovld atom_and(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_and(volatile __global long *p, long val);
+unsigned long __ovld atom_and(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_and(volatile __local long *p, long val);
+unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old | val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_or(volatile __global int *p, int val);
+unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_or(volatile __local int *p, int val);
+unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_or(volatile __global int *p, int val);
+unsigned int __ovld atom_or(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_or(volatile __local int *p, int val);
+unsigned int __ovld atom_or(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_or(volatile __global long *p, long val);
+unsigned long __ovld atom_or(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_or(volatile __local long *p, long val);
+unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long val);
+#endif
+
+/**
+ * Read the 32-bit value (referred to as old)
+ * stored at location pointed by p. Compute
+ * (old ^ val) and store result at location
+ * pointed by p. The function returns old.
+ */
+int __ovld atomic_xor(volatile __global int *p, int val);
+unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val);
+int __ovld atomic_xor(volatile __local int *p, int val);
+unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val);
+
+#if defined(cl_khr_global_int32_extended_atomics)
+int __ovld atom_xor(volatile __global int *p, int val);
+unsigned int __ovld atom_xor(volatile __global unsigned int *p, unsigned int val);
+#endif
+#if defined(cl_khr_local_int32_extended_atomics)
+int __ovld atom_xor(volatile __local int *p, int val);
+unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val);
+#endif
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable
+#endif
+
+// OpenCL v2.0 s6.13.11 - Atomics Functions
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#ifndef ATOMIC_VAR_INIT
+#define ATOMIC_VAR_INIT(x) (x)
+#endif //ATOMIC_VAR_INIT
+#define ATOMIC_FLAG_INIT 0
+
+// enum values aligned with what clang uses in EmitAtomicExpr()
+typedef enum memory_order
+{
+ memory_order_relaxed,
+ memory_order_acquire,
+ memory_order_release,
+ memory_order_acq_rel,
+ memory_order_seq_cst
+} memory_order;
+
+// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
+#endif
+
+// atomic_init()
+void __ovld atomic_init(volatile atomic_int *object, int value);
+void __ovld atomic_init(volatile atomic_uint *object, uint value);
+void __ovld atomic_init(volatile atomic_float *object, float value);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+void __ovld atomic_init(volatile atomic_long *object, long value);
+void __ovld atomic_init(volatile atomic_ulong *object, ulong value);
+#ifdef cl_khr_fp64
+void __ovld atomic_init(volatile atomic_double *object, double value);
+#endif //cl_khr_fp64
+#endif
+
+// atomic_work_item_fence()
+void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope);
+
+// atomic_fetch()
+
+int __ovld atomic_fetch_add(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_or(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_and(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_min(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_min(volatile atomic_uint *object, int operand);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_max(volatile atomic_uint *object, int operand);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_or(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_and(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_min(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, long operand);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, long operand);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+
+// OpenCL v2.0 s6.13.11.7.5:
+// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
+// or/xor/and/min/max: atomic type argument can be intptr_t/uintptr_t, value type argument can be intptr_t/uintptr_t.
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
+uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
+
+uintptr_t __ovld atomic_fetch_or(volatile atomic_uintptr_t *object, intptr_t operand);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
+uintptr_t __ovld atomic_fetch_xor(volatile atomic_uintptr_t *object, intptr_t operand);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
+uintptr_t __ovld atomic_fetch_and(volatile atomic_uintptr_t *object, intptr_t operand);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
+uintptr_t __ovld atomic_fetch_min(volatile atomic_uintptr_t *object, intptr_t opermax);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder, memory_scope scope);
+uintptr_t __ovld atomic_fetch_max(volatile atomic_uintptr_t *object, intptr_t opermax);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder, memory_scope scope);
+
+intptr_t __ovld atomic_fetch_or(volatile atomic_intptr_t *object, uintptr_t operand);
+intptr_t __ovld atomic_fetch_or_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
+intptr_t __ovld atomic_fetch_or_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
+intptr_t __ovld atomic_fetch_xor(volatile atomic_intptr_t *object, uintptr_t operand);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
+intptr_t __ovld atomic_fetch_and(volatile atomic_intptr_t *object, uintptr_t operand);
+intptr_t __ovld atomic_fetch_and_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
+intptr_t __ovld atomic_fetch_and_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
+intptr_t __ovld atomic_fetch_min(volatile atomic_intptr_t *object, uintptr_t opermax);
+intptr_t __ovld atomic_fetch_min_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder);
+intptr_t __ovld atomic_fetch_min_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder, memory_scope scope);
+intptr_t __ovld atomic_fetch_max(volatile atomic_intptr_t *object, uintptr_t opermax);
+intptr_t __ovld atomic_fetch_max_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder);
+intptr_t __ovld atomic_fetch_max_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder, memory_scope scope);
+#endif
+
+// atomic_store()
+
+void __ovld atomic_store(volatile atomic_int *object, int desired);
+void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
+void __ovld atomic_store(volatile atomic_uint *object, uint desired);
+void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
+void __ovld atomic_store(volatile atomic_float *object, float desired);
+void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store(volatile atomic_double *object, double desired);
+void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
+#endif //cl_khr_fp64
+void __ovld atomic_store(volatile atomic_long *object, long desired);
+void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
+void __ovld atomic_store(volatile atomic_ulong *object, ulong desired);
+void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
+#endif
+
+// atomic_load()
+
+int __ovld atomic_load(volatile atomic_int *object);
+int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order);
+int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope);
+uint __ovld atomic_load(volatile atomic_uint *object);
+uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order);
+uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope);
+float __ovld atomic_load(volatile atomic_float *object);
+float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order);
+float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load(volatile atomic_double *object);
+double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order);
+double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope);
+#endif //cl_khr_fp64
+long __ovld atomic_load(volatile atomic_long *object);
+long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order);
+long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope);
+ulong __ovld atomic_load(volatile atomic_ulong *object);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope);
+#endif
+
+// atomic_exchange()
+
+int __ovld atomic_exchange(volatile atomic_int *object, int desired);
+int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order);
+int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
+uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
+float __ovld atomic_exchange(volatile atomic_float *object, float desired);
+float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order);
+float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange(volatile atomic_double *object, double desired);
+double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order);
+double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange(volatile atomic_long *object, long desired);
+long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order);
+long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
+ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
+#endif
+
+// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
+
+bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
+ int desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
+ int desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
+ uint desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
+ uint desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
+ int desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
+ int desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
+ uint desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
+ uint desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
+ float desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
+ float desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
+ float desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
+ float desired, memory_order success, memory_order failure, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
+ double desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
+ double desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
+ double desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
+ double desired, memory_order success, memory_order failure, memory_scope scope);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
+ long desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
+ long desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
+ long desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
+ long desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
+ ulong desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
+ ulong desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
+ ulong desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
+ ulong desired, memory_order success, memory_order failure, memory_scope scope);
+#endif
+
+// atomic_flag_test_and_set() and atomic_flag_clear()
+
+bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object);
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order);
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
+void __ovld atomic_flag_clear(volatile atomic_flag *object);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
+
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
+
+/**
+ * The shuffle and shuffle2 built-in functions construct
+ * a permutation of elements from one or two input
+ * vectors respectively that are of the same type,
+ * returning a vector with the same element type as the
+ * input and length that is the same as the shuffle mask.
+ * The size of each element in the mask must match the
+ * size of each element in the result. For shuffle, only
+ * the ilogb(2m-1) least significant bits of each mask
+ * element are considered. For shuffle2, only the
+ * ilogb(2m-1)+1 least significant bits of each mask
+ * element are considered. Other bits in the mask shall
+ * be ignored.
+ * The elements of the input vectors are numbered from
+ * left to right across one or both of the vectors. For this
+ * purpose, the number of elements in a vector is given
+ * by vec_step(gentypem). The shuffle mask operand
+ * specifies, for each element of the result vector, which
+ * element of the one or two input vectors the result
+ * element gets.
+ * Examples:
+ * uint4 mask = (uint4)(3, 2,
+ * 1, 0);
+ * float4 a;
+ * float4 r = shuffle(a, mask);
+ * // r.s0123 = a.wzyx
+ * uint8 mask = (uint8)(0, 1, 2, 3,
+ * 4, 5, 6, 7);
+ * float4 a, b;
+ * float8 r = shuffle2(a, b, mask);
+ * // r.s0123 = a.xyzw
+ * // r.s4567 = b.xyzw
+ * uint4 mask;
+ * float8 a;
+ * float4 b;
+ * b = shuffle(a, mask);
+ * Examples that are not valid are:
+ * uint8 mask;
+ * short16 a;
+ * short8 b;
+ * b = shuffle(a, mask); <- not valid
+ */
+char2 __ovld __cnfn shuffle(char2 x, uchar2 mask);
+char2 __ovld __cnfn shuffle(char4 x, uchar2 mask);
+char2 __ovld __cnfn shuffle(char8 x, uchar2 mask);
+char2 __ovld __cnfn shuffle(char16 x, uchar2 mask);
+
+uchar2 __ovld __cnfn shuffle(uchar2 x, uchar2 mask);
+uchar2 __ovld __cnfn shuffle(uchar4 x, uchar2 mask);
+uchar2 __ovld __cnfn shuffle(uchar8 x, uchar2 mask);
+uchar2 __ovld __cnfn shuffle(uchar16 x, uchar2 mask);
+
+short2 __ovld __cnfn shuffle(short2 x, ushort2 mask);
+short2 __ovld __cnfn shuffle(short4 x, ushort2 mask);
+short2 __ovld __cnfn shuffle(short8 x, ushort2 mask);
+short2 __ovld __cnfn shuffle(short16 x, ushort2 mask);
+
+ushort2 __ovld __cnfn shuffle(ushort2 x, ushort2 mask);
+ushort2 __ovld __cnfn shuffle(ushort4 x, ushort2 mask);
+ushort2 __ovld __cnfn shuffle(ushort8 x, ushort2 mask);
+ushort2 __ovld __cnfn shuffle(ushort16 x, ushort2 mask);
+
+int2 __ovld __cnfn shuffle(int2 x, uint2 mask);
+int2 __ovld __cnfn shuffle(int4 x, uint2 mask);
+int2 __ovld __cnfn shuffle(int8 x, uint2 mask);
+int2 __ovld __cnfn shuffle(int16 x, uint2 mask);
+
+uint2 __ovld __cnfn shuffle(uint2 x, uint2 mask);
+uint2 __ovld __cnfn shuffle(uint4 x, uint2 mask);
+uint2 __ovld __cnfn shuffle(uint8 x, uint2 mask);
+uint2 __ovld __cnfn shuffle(uint16 x, uint2 mask);
+
+long2 __ovld __cnfn shuffle(long2 x, ulong2 mask);
+long2 __ovld __cnfn shuffle(long4 x, ulong2 mask);
+long2 __ovld __cnfn shuffle(long8 x, ulong2 mask);
+long2 __ovld __cnfn shuffle(long16 x, ulong2 mask);
+
+ulong2 __ovld __cnfn shuffle(ulong2 x, ulong2 mask);
+ulong2 __ovld __cnfn shuffle(ulong4 x, ulong2 mask);
+ulong2 __ovld __cnfn shuffle(ulong8 x, ulong2 mask);
+ulong2 __ovld __cnfn shuffle(ulong16 x, ulong2 mask);
+
+float2 __ovld __cnfn shuffle(float2 x, uint2 mask);
+float2 __ovld __cnfn shuffle(float4 x, uint2 mask);
+float2 __ovld __cnfn shuffle(float8 x, uint2 mask);
+float2 __ovld __cnfn shuffle(float16 x, uint2 mask);
+
+char4 __ovld __cnfn shuffle(char2 x, uchar4 mask);
+char4 __ovld __cnfn shuffle(char4 x, uchar4 mask);
+char4 __ovld __cnfn shuffle(char8 x, uchar4 mask);
+char4 __ovld __cnfn shuffle(char16 x, uchar4 mask);
+
+uchar4 __ovld __cnfn shuffle(uchar2 x, uchar4 mask);
+uchar4 __ovld __cnfn shuffle(uchar4 x, uchar4 mask);
+uchar4 __ovld __cnfn shuffle(uchar8 x, uchar4 mask);
+uchar4 __ovld __cnfn shuffle(uchar16 x, uchar4 mask);
+
+short4 __ovld __cnfn shuffle(short2 x, ushort4 mask);
+short4 __ovld __cnfn shuffle(short4 x, ushort4 mask);
+short4 __ovld __cnfn shuffle(short8 x, ushort4 mask);
+short4 __ovld __cnfn shuffle(short16 x, ushort4 mask);
+
+ushort4 __ovld __cnfn shuffle(ushort2 x, ushort4 mask);
+ushort4 __ovld __cnfn shuffle(ushort4 x, ushort4 mask);
+ushort4 __ovld __cnfn shuffle(ushort8 x, ushort4 mask);
+ushort4 __ovld __cnfn shuffle(ushort16 x, ushort4 mask);
+
+int4 __ovld __cnfn shuffle(int2 x, uint4 mask);
+int4 __ovld __cnfn shuffle(int4 x, uint4 mask);
+int4 __ovld __cnfn shuffle(int8 x, uint4 mask);
+int4 __ovld __cnfn shuffle(int16 x, uint4 mask);
+
+uint4 __ovld __cnfn shuffle(uint2 x, uint4 mask);
+uint4 __ovld __cnfn shuffle(uint4 x, uint4 mask);
+uint4 __ovld __cnfn shuffle(uint8 x, uint4 mask);
+uint4 __ovld __cnfn shuffle(uint16 x, uint4 mask);
+
+long4 __ovld __cnfn shuffle(long2 x, ulong4 mask);
+long4 __ovld __cnfn shuffle(long4 x, ulong4 mask);
+long4 __ovld __cnfn shuffle(long8 x, ulong4 mask);
+long4 __ovld __cnfn shuffle(long16 x, ulong4 mask);
+
+ulong4 __ovld __cnfn shuffle(ulong2 x, ulong4 mask);
+ulong4 __ovld __cnfn shuffle(ulong4 x, ulong4 mask);
+ulong4 __ovld __cnfn shuffle(ulong8 x, ulong4 mask);
+ulong4 __ovld __cnfn shuffle(ulong16 x, ulong4 mask);
+
+float4 __ovld __cnfn shuffle(float2 x, uint4 mask);
+float4 __ovld __cnfn shuffle(float4 x, uint4 mask);
+float4 __ovld __cnfn shuffle(float8 x, uint4 mask);
+float4 __ovld __cnfn shuffle(float16 x, uint4 mask);
+
+char8 __ovld __cnfn shuffle(char2 x, uchar8 mask);
+char8 __ovld __cnfn shuffle(char4 x, uchar8 mask);
+char8 __ovld __cnfn shuffle(char8 x, uchar8 mask);
+char8 __ovld __cnfn shuffle(char16 x, uchar8 mask);
+
+uchar8 __ovld __cnfn shuffle(uchar2 x, uchar8 mask);
+uchar8 __ovld __cnfn shuffle(uchar4 x, uchar8 mask);
+uchar8 __ovld __cnfn shuffle(uchar8 x, uchar8 mask);
+uchar8 __ovld __cnfn shuffle(uchar16 x, uchar8 mask);
+
+short8 __ovld __cnfn shuffle(short2 x, ushort8 mask);
+short8 __ovld __cnfn shuffle(short4 x, ushort8 mask);
+short8 __ovld __cnfn shuffle(short8 x, ushort8 mask);
+short8 __ovld __cnfn shuffle(short16 x, ushort8 mask);
+
+ushort8 __ovld __cnfn shuffle(ushort2 x, ushort8 mask);
+ushort8 __ovld __cnfn shuffle(ushort4 x, ushort8 mask);
+ushort8 __ovld __cnfn shuffle(ushort8 x, ushort8 mask);
+ushort8 __ovld __cnfn shuffle(ushort16 x, ushort8 mask);
+
+int8 __ovld __cnfn shuffle(int2 x, uint8 mask);
+int8 __ovld __cnfn shuffle(int4 x, uint8 mask);
+int8 __ovld __cnfn shuffle(int8 x, uint8 mask);
+int8 __ovld __cnfn shuffle(int16 x, uint8 mask);
+
+uint8 __ovld __cnfn shuffle(uint2 x, uint8 mask);
+uint8 __ovld __cnfn shuffle(uint4 x, uint8 mask);
+uint8 __ovld __cnfn shuffle(uint8 x, uint8 mask);
+uint8 __ovld __cnfn shuffle(uint16 x, uint8 mask);
+
+long8 __ovld __cnfn shuffle(long2 x, ulong8 mask);
+long8 __ovld __cnfn shuffle(long4 x, ulong8 mask);
+long8 __ovld __cnfn shuffle(long8 x, ulong8 mask);
+long8 __ovld __cnfn shuffle(long16 x, ulong8 mask);
+
+ulong8 __ovld __cnfn shuffle(ulong2 x, ulong8 mask);
+ulong8 __ovld __cnfn shuffle(ulong4 x, ulong8 mask);
+ulong8 __ovld __cnfn shuffle(ulong8 x, ulong8 mask);
+ulong8 __ovld __cnfn shuffle(ulong16 x, ulong8 mask);
+
+float8 __ovld __cnfn shuffle(float2 x, uint8 mask);
+float8 __ovld __cnfn shuffle(float4 x, uint8 mask);
+float8 __ovld __cnfn shuffle(float8 x, uint8 mask);
+float8 __ovld __cnfn shuffle(float16 x, uint8 mask);
+
+char16 __ovld __cnfn shuffle(char2 x, uchar16 mask);
+char16 __ovld __cnfn shuffle(char4 x, uchar16 mask);
+char16 __ovld __cnfn shuffle(char8 x, uchar16 mask);
+char16 __ovld __cnfn shuffle(char16 x, uchar16 mask);
+
+uchar16 __ovld __cnfn shuffle(uchar2 x, uchar16 mask);
+uchar16 __ovld __cnfn shuffle(uchar4 x, uchar16 mask);
+uchar16 __ovld __cnfn shuffle(uchar8 x, uchar16 mask);
+uchar16 __ovld __cnfn shuffle(uchar16 x, uchar16 mask);
+
+short16 __ovld __cnfn shuffle(short2 x, ushort16 mask);
+short16 __ovld __cnfn shuffle(short4 x, ushort16 mask);
+short16 __ovld __cnfn shuffle(short8 x, ushort16 mask);
+short16 __ovld __cnfn shuffle(short16 x, ushort16 mask);
+
+ushort16 __ovld __cnfn shuffle(ushort2 x, ushort16 mask);
+ushort16 __ovld __cnfn shuffle(ushort4 x, ushort16 mask);
+ushort16 __ovld __cnfn shuffle(ushort8 x, ushort16 mask);
+ushort16 __ovld __cnfn shuffle(ushort16 x, ushort16 mask);
+
+int16 __ovld __cnfn shuffle(int2 x, uint16 mask);
+int16 __ovld __cnfn shuffle(int4 x, uint16 mask);
+int16 __ovld __cnfn shuffle(int8 x, uint16 mask);
+int16 __ovld __cnfn shuffle(int16 x, uint16 mask);
+
+uint16 __ovld __cnfn shuffle(uint2 x, uint16 mask);
+uint16 __ovld __cnfn shuffle(uint4 x, uint16 mask);
+uint16 __ovld __cnfn shuffle(uint8 x, uint16 mask);
+uint16 __ovld __cnfn shuffle(uint16 x, uint16 mask);
+
+long16 __ovld __cnfn shuffle(long2 x, ulong16 mask);
+long16 __ovld __cnfn shuffle(long4 x, ulong16 mask);
+long16 __ovld __cnfn shuffle(long8 x, ulong16 mask);
+long16 __ovld __cnfn shuffle(long16 x, ulong16 mask);
+
+ulong16 __ovld __cnfn shuffle(ulong2 x, ulong16 mask);
+ulong16 __ovld __cnfn shuffle(ulong4 x, ulong16 mask);
+ulong16 __ovld __cnfn shuffle(ulong8 x, ulong16 mask);
+ulong16 __ovld __cnfn shuffle(ulong16 x, ulong16 mask);
+
+float16 __ovld __cnfn shuffle(float2 x, uint16 mask);
+float16 __ovld __cnfn shuffle(float4 x, uint16 mask);
+float16 __ovld __cnfn shuffle(float8 x, uint16 mask);
+float16 __ovld __cnfn shuffle(float16 x, uint16 mask);
+
+#ifdef cl_khr_fp64
+double2 __ovld __cnfn shuffle(double2 x, ulong2 mask);
+double2 __ovld __cnfn shuffle(double4 x, ulong2 mask);
+double2 __ovld __cnfn shuffle(double8 x, ulong2 mask);
+double2 __ovld __cnfn shuffle(double16 x, ulong2 mask);
+
+double4 __ovld __cnfn shuffle(double2 x, ulong4 mask);
+double4 __ovld __cnfn shuffle(double4 x, ulong4 mask);
+double4 __ovld __cnfn shuffle(double8 x, ulong4 mask);
+double4 __ovld __cnfn shuffle(double16 x, ulong4 mask);
+
+double8 __ovld __cnfn shuffle(double2 x, ulong8 mask);
+double8 __ovld __cnfn shuffle(double4 x, ulong8 mask);
+double8 __ovld __cnfn shuffle(double8 x, ulong8 mask);
+double8 __ovld __cnfn shuffle(double16 x, ulong8 mask);
+
+double16 __ovld __cnfn shuffle(double2 x, ulong16 mask);
+double16 __ovld __cnfn shuffle(double4 x, ulong16 mask);
+double16 __ovld __cnfn shuffle(double8 x, ulong16 mask);
+double16 __ovld __cnfn shuffle(double16 x, ulong16 mask);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __cnfn shuffle(half2 x, ushort2 mask);
+half2 __ovld __cnfn shuffle(half4 x, ushort2 mask);
+half2 __ovld __cnfn shuffle(half8 x, ushort2 mask);
+half2 __ovld __cnfn shuffle(half16 x, ushort2 mask);
+
+half4 __ovld __cnfn shuffle(half2 x, ushort4 mask);
+half4 __ovld __cnfn shuffle(half4 x, ushort4 mask);
+half4 __ovld __cnfn shuffle(half8 x, ushort4 mask);
+half4 __ovld __cnfn shuffle(half16 x, ushort4 mask);
+
+half8 __ovld __cnfn shuffle(half2 x, ushort8 mask);
+half8 __ovld __cnfn shuffle(half4 x, ushort8 mask);
+half8 __ovld __cnfn shuffle(half8 x, ushort8 mask);
+half8 __ovld __cnfn shuffle(half16 x, ushort8 mask);
+
+half16 __ovld __cnfn shuffle(half2 x, ushort16 mask);
+half16 __ovld __cnfn shuffle(half4 x, ushort16 mask);
+half16 __ovld __cnfn shuffle(half8 x, ushort16 mask);
+half16 __ovld __cnfn shuffle(half16 x, ushort16 mask);
+#endif //cl_khr_fp16
+
+char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask);
+char2 __ovld __cnfn shuffle2(char4 x, char4 y, uchar2 mask);
+char2 __ovld __cnfn shuffle2(char8 x, char8 y, uchar2 mask);
+char2 __ovld __cnfn shuffle2(char16 x, char16 y, uchar2 mask);
+
+uchar2 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar2 mask);
+uchar2 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar2 mask);
+uchar2 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar2 mask);
+uchar2 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar2 mask);
+
+short2 __ovld __cnfn shuffle2(short2 x, short2 y, ushort2 mask);
+short2 __ovld __cnfn shuffle2(short4 x, short4 y, ushort2 mask);
+short2 __ovld __cnfn shuffle2(short8 x, short8 y, ushort2 mask);
+short2 __ovld __cnfn shuffle2(short16 x, short16 y, ushort2 mask);
+
+ushort2 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort2 mask);
+ushort2 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort2 mask);
+ushort2 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort2 mask);
+ushort2 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort2 mask);
+
+int2 __ovld __cnfn shuffle2(int2 x, int2 y, uint2 mask);
+int2 __ovld __cnfn shuffle2(int4 x, int4 y, uint2 mask);
+int2 __ovld __cnfn shuffle2(int8 x, int8 y, uint2 mask);
+int2 __ovld __cnfn shuffle2(int16 x, int16 y, uint2 mask);
+
+uint2 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint2 mask);
+uint2 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint2 mask);
+uint2 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint2 mask);
+uint2 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint2 mask);
+
+long2 __ovld __cnfn shuffle2(long2 x, long2 y, ulong2 mask);
+long2 __ovld __cnfn shuffle2(long4 x, long4 y, ulong2 mask);
+long2 __ovld __cnfn shuffle2(long8 x, long8 y, ulong2 mask);
+long2 __ovld __cnfn shuffle2(long16 x, long16 y, ulong2 mask);
+
+ulong2 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong2 mask);
+ulong2 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong2 mask);
+ulong2 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong2 mask);
+ulong2 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong2 mask);
+
+float2 __ovld __cnfn shuffle2(float2 x, float2 y, uint2 mask);
+float2 __ovld __cnfn shuffle2(float4 x, float4 y, uint2 mask);
+float2 __ovld __cnfn shuffle2(float8 x, float8 y, uint2 mask);
+float2 __ovld __cnfn shuffle2(float16 x, float16 y, uint2 mask);
+
+char4 __ovld __cnfn shuffle2(char2 x, char2 y, uchar4 mask);
+char4 __ovld __cnfn shuffle2(char4 x, char4 y, uchar4 mask);
+char4 __ovld __cnfn shuffle2(char8 x, char8 y, uchar4 mask);
+char4 __ovld __cnfn shuffle2(char16 x, char16 y, uchar4 mask);
+
+uchar4 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar4 mask);
+uchar4 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar4 mask);
+uchar4 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar4 mask);
+uchar4 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar4 mask);
+
+short4 __ovld __cnfn shuffle2(short2 x, short2 y, ushort4 mask);
+short4 __ovld __cnfn shuffle2(short4 x, short4 y, ushort4 mask);
+short4 __ovld __cnfn shuffle2(short8 x, short8 y, ushort4 mask);
+short4 __ovld __cnfn shuffle2(short16 x, short16 y, ushort4 mask);
+
+ushort4 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort4 mask);
+ushort4 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort4 mask);
+ushort4 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort4 mask);
+ushort4 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort4 mask);
+
+int4 __ovld __cnfn shuffle2(int2 x, int2 y, uint4 mask);
+int4 __ovld __cnfn shuffle2(int4 x, int4 y, uint4 mask);
+int4 __ovld __cnfn shuffle2(int8 x, int8 y, uint4 mask);
+int4 __ovld __cnfn shuffle2(int16 x, int16 y, uint4 mask);
+
+uint4 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint4 mask);
+uint4 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint4 mask);
+uint4 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint4 mask);
+uint4 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint4 mask);
+
+long4 __ovld __cnfn shuffle2(long2 x, long2 y, ulong4 mask);
+long4 __ovld __cnfn shuffle2(long4 x, long4 y, ulong4 mask);
+long4 __ovld __cnfn shuffle2(long8 x, long8 y, ulong4 mask);
+long4 __ovld __cnfn shuffle2(long16 x, long16 y, ulong4 mask);
+
+ulong4 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong4 mask);
+ulong4 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong4 mask);
+ulong4 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong4 mask);
+ulong4 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong4 mask);
+
+float4 __ovld __cnfn shuffle2(float2 x, float2 y, uint4 mask);
+float4 __ovld __cnfn shuffle2(float4 x, float4 y, uint4 mask);
+float4 __ovld __cnfn shuffle2(float8 x, float8 y, uint4 mask);
+float4 __ovld __cnfn shuffle2(float16 x, float16 y, uint4 mask);
+
+char8 __ovld __cnfn shuffle2(char2 x, char2 y, uchar8 mask);
+char8 __ovld __cnfn shuffle2(char4 x, char4 y, uchar8 mask);
+char8 __ovld __cnfn shuffle2(char8 x, char8 y, uchar8 mask);
+char8 __ovld __cnfn shuffle2(char16 x, char16 y, uchar8 mask);
+
+uchar8 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar8 mask);
+uchar8 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar8 mask);
+uchar8 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar8 mask);
+uchar8 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar8 mask);
+
+short8 __ovld __cnfn shuffle2(short2 x, short2 y, ushort8 mask);
+short8 __ovld __cnfn shuffle2(short4 x, short4 y, ushort8 mask);
+short8 __ovld __cnfn shuffle2(short8 x, short8 y, ushort8 mask);
+short8 __ovld __cnfn shuffle2(short16 x, short16 y, ushort8 mask);
+
+ushort8 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort8 mask);
+ushort8 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort8 mask);
+ushort8 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort8 mask);
+ushort8 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort8 mask);
+
+int8 __ovld __cnfn shuffle2(int2 x, int2 y, uint8 mask);
+int8 __ovld __cnfn shuffle2(int4 x, int4 y, uint8 mask);
+int8 __ovld __cnfn shuffle2(int8 x, int8 y, uint8 mask);
+int8 __ovld __cnfn shuffle2(int16 x, int16 y, uint8 mask);
+
+uint8 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint8 mask);
+uint8 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint8 mask);
+uint8 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint8 mask);
+uint8 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint8 mask);
+
+long8 __ovld __cnfn shuffle2(long2 x, long2 y, ulong8 mask);
+long8 __ovld __cnfn shuffle2(long4 x, long4 y, ulong8 mask);
+long8 __ovld __cnfn shuffle2(long8 x, long8 y, ulong8 mask);
+long8 __ovld __cnfn shuffle2(long16 x, long16 y, ulong8 mask);
+
+ulong8 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong8 mask);
+ulong8 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong8 mask);
+ulong8 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong8 mask);
+ulong8 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong8 mask);
+
+float8 __ovld __cnfn shuffle2(float2 x, float2 y, uint8 mask);
+float8 __ovld __cnfn shuffle2(float4 x, float4 y, uint8 mask);
+float8 __ovld __cnfn shuffle2(float8 x, float8 y, uint8 mask);
+float8 __ovld __cnfn shuffle2(float16 x, float16 y, uint8 mask);
+
+char16 __ovld __cnfn shuffle2(char2 x, char2 y, uchar16 mask);
+char16 __ovld __cnfn shuffle2(char4 x, char4 y, uchar16 mask);
+char16 __ovld __cnfn shuffle2(char8 x, char8 y, uchar16 mask);
+char16 __ovld __cnfn shuffle2(char16 x, char16 y, uchar16 mask);
+
+uchar16 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar16 mask);
+uchar16 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar16 mask);
+uchar16 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar16 mask);
+uchar16 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar16 mask);
+
+short16 __ovld __cnfn shuffle2(short2 x, short2 y, ushort16 mask);
+short16 __ovld __cnfn shuffle2(short4 x, short4 y, ushort16 mask);
+short16 __ovld __cnfn shuffle2(short8 x, short8 y, ushort16 mask);
+short16 __ovld __cnfn shuffle2(short16 x, short16 y, ushort16 mask);
+
+ushort16 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort16 mask);
+ushort16 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort16 mask);
+ushort16 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort16 mask);
+ushort16 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort16 mask);
+
+int16 __ovld __cnfn shuffle2(int2 x, int2 y, uint16 mask);
+int16 __ovld __cnfn shuffle2(int4 x, int4 y, uint16 mask);
+int16 __ovld __cnfn shuffle2(int8 x, int8 y, uint16 mask);
+int16 __ovld __cnfn shuffle2(int16 x, int16 y, uint16 mask);
+
+uint16 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint16 mask);
+uint16 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint16 mask);
+uint16 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint16 mask);
+uint16 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint16 mask);
+
+long16 __ovld __cnfn shuffle2(long2 x, long2 y, ulong16 mask);
+long16 __ovld __cnfn shuffle2(long4 x, long4 y, ulong16 mask);
+long16 __ovld __cnfn shuffle2(long8 x, long8 y, ulong16 mask);
+long16 __ovld __cnfn shuffle2(long16 x, long16 y, ulong16 mask);
+
+ulong16 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong16 mask);
+ulong16 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong16 mask);
+ulong16 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong16 mask);
+ulong16 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong16 mask);
+
+float16 __ovld __cnfn shuffle2(float2 x, float2 y, uint16 mask);
+float16 __ovld __cnfn shuffle2(float4 x, float4 y, uint16 mask);
+float16 __ovld __cnfn shuffle2(float8 x, float8 y, uint16 mask);
+float16 __ovld __cnfn shuffle2(float16 x, float16 y, uint16 mask);
+
+#ifdef cl_khr_fp64
+double2 __ovld __cnfn shuffle2(double2 x, double2 y, ulong2 mask);
+double2 __ovld __cnfn shuffle2(double4 x, double4 y, ulong2 mask);
+double2 __ovld __cnfn shuffle2(double8 x, double8 y, ulong2 mask);
+double2 __ovld __cnfn shuffle2(double16 x, double16 y, ulong2 mask);
+
+double4 __ovld __cnfn shuffle2(double2 x, double2 y, ulong4 mask);
+double4 __ovld __cnfn shuffle2(double4 x, double4 y, ulong4 mask);
+double4 __ovld __cnfn shuffle2(double8 x, double8 y, ulong4 mask);
+double4 __ovld __cnfn shuffle2(double16 x, double16 y, ulong4 mask);
+
+double8 __ovld __cnfn shuffle2(double2 x, double2 y, ulong8 mask);
+double8 __ovld __cnfn shuffle2(double4 x, double4 y, ulong8 mask);
+double8 __ovld __cnfn shuffle2(double8 x, double8 y, ulong8 mask);
+double8 __ovld __cnfn shuffle2(double16 x, double16 y, ulong8 mask);
+
+double16 __ovld __cnfn shuffle2(double2 x, double2 y, ulong16 mask);
+double16 __ovld __cnfn shuffle2(double4 x, double4 y, ulong16 mask);
+double16 __ovld __cnfn shuffle2(double8 x, double8 y, ulong16 mask);
+double16 __ovld __cnfn shuffle2(double16 x, double16 y, ulong16 mask);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half2 __ovld __cnfn shuffle2(half2 x, half2 y, ushort2 mask);
+half2 __ovld __cnfn shuffle2(half4 x, half4 y, ushort2 mask);
+half2 __ovld __cnfn shuffle2(half8 x, half8 y, ushort2 mask);
+half2 __ovld __cnfn shuffle2(half16 x, half16 y, ushort2 mask);
+
+half4 __ovld __cnfn shuffle2(half2 x, half2 y, ushort4 mask);
+half4 __ovld __cnfn shuffle2(half4 x, half4 y, ushort4 mask);
+half4 __ovld __cnfn shuffle2(half8 x, half8 y, ushort4 mask);
+half4 __ovld __cnfn shuffle2(half16 x, half16 y, ushort4 mask);
+
+half8 __ovld __cnfn shuffle2(half2 x, half2 y, ushort8 mask);
+half8 __ovld __cnfn shuffle2(half4 x, half4 y, ushort8 mask);
+half8 __ovld __cnfn shuffle2(half8 x, half8 y, ushort8 mask);
+half8 __ovld __cnfn shuffle2(half16 x, half16 y, ushort8 mask);
+
+half16 __ovld __cnfn shuffle2(half2 x, half2 y, ushort16 mask);
+half16 __ovld __cnfn shuffle2(half4 x, half4 y, ushort16 mask);
+half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
+half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
+#endif //cl_khr_fp16
+
+// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
+
+int printf(__constant const char* st, ...);
+
+// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
+
+// These values need to match the runtime equivalent
+//
+// Addressing Mode.
+//
+#define CLK_ADDRESS_NONE 0
+#define CLK_ADDRESS_CLAMP_TO_EDGE 2
+#define CLK_ADDRESS_CLAMP 4
+#define CLK_ADDRESS_REPEAT 6
+#define CLK_ADDRESS_MIRRORED_REPEAT 8
+
+//
+// Coordination Normalization
+//
+#define CLK_NORMALIZED_COORDS_FALSE 0
+#define CLK_NORMALIZED_COORDS_TRUE 1
+
+//
+// Filtering Mode.
+//
+#define CLK_FILTER_NEAREST 0x10
+#define CLK_FILTER_LINEAR 0x20
+
+/**
+ * Use the coordinate (coord.xy) to do an element lookup in
+ * the 2D image object specified by image.
+ *
+ * Use the coordinate (coord.x, coord.y, coord.z) to do
+ * an element lookup in the 3D image object specified
+ * by image. coord.w is ignored.
+ *
+ * Use the coordinate (coord.z) to index into the
+ * 2D image array object specified by image_array
+ * and (coord.x, coord.y) to do an element lookup in
+ * the 2D image object specified by image.
+ *
+ * Use the coordinate (x) to do an element lookup in
+ * the 1D image object specified by image.
+ *
+ * Use the coordinate (coord.y) to index into the
+ * 1D image array object specified by image_array
+ * and (coord.x) to do an element lookup in
+ * the 1D image object specified by image.
+ *
+ * Use the coordinate (cood.xy) and sample to do an
+ * element lookup in the 2D multi-sample image specified
+ * by image.
+ *
+ * Use coord.xy and sample to do an element
+ * lookup in the 2D multi-sample image layer
+ * identified by index coord.z in the 2D multi-sample
+ * image array specified by image.
+ *
+ * For mipmap images, use the mip-level specified by
+ * the Level-of-Detail (lod) or use gradients for LOD
+ * computation.
+ *
+ * read_imagef returns floating-point values in the
+ * range [0.0 ... 1.0] for image objects created with
+ * image_channel_data_type set to one of the predefined
+ * packed formats or CL_UNORM_INT8, or
+ * CL_UNORM_INT16.
+ *
+ * read_imagef returns floating-point values in the
+ * range [-1.0 ... 1.0] for image objects created with
+ * image_channel_data_type set to CL_SNORM_INT8,
+ * or CL_SNORM_INT16.
+ *
+ * read_imagef returns floating-point values for image
+ * objects created with image_channel_data_type set to
+ * CL_HALF_FLOAT or CL_FLOAT.
+ *
+ * read_imagei and read_imageui return
+ * unnormalized signed integer and unsigned integer
+ * values respectively. Each channel will be stored in a
+ * 32-bit integer.
+ *
+ * read_imagei can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_SIGNED_INT8,
+ * CL_SIGNED_INT16 and
+ * CL_SIGNED_INT32.
+ * If the image_channel_data_type is not one of the
+ * above values, the values returned by read_imagei
+ * are undefined.
+ *
+ * read_imageui can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_UNSIGNED_INT8,
+ * CL_UNSIGNED_INT16 and
+ * CL_UNSIGNED_INT32.
+ * If the image_channel_data_type is not one of the
+ * above values, the values returned by read_imageui
+ * are undefined.
+ *
+ * The read_image{i|ui} calls support a nearest filter
+ * only. The filter_mode specified in sampler
+ * must be set to CLK_FILTER_NEAREST; otherwise
+ * the values returned are undefined.
+
+ * The read_image{f|i|ui} calls that take
+ * integer coordinates must use a sampler with
+ * normalized coordinates set to
+ * CLK_NORMALIZED_COORDS_FALSE and
+ * addressing mode set to
+ * CLK_ADDRESS_CLAMP_TO_EDGE,
+ * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE;
+ * otherwise the values returned are undefined.
+ *
+ * Values returned by read_imagef for image objects
+ * with image_channel_data_type values not specified
+ * in the description above are undefined.
+ */
+
+float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord);
+float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord);
+
+int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord);
+int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord);
+
+float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, int4 coord);
+float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord);
+
+int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, int4 coord);
+int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+
+float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord);
+float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord);
+
+int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, int coord);
+int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord);
+
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+
+#ifdef cl_khr_depth_images
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord);
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, int2 coord);
+
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord);
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, int4 coord);
+#endif //cl_khr_depth_images
+
+#if defined(cl_khr_gl_msaa_sharing)
+float4 __purefn __ovld read_imagef(read_only image2d_msaa_t image, int2 coord, int sample);
+int4 __purefn __ovld read_imagei(read_only image2d_msaa_t image, int2 coord, int sample);
+uint4 __purefn __ovld read_imageui(read_only image2d_msaa_t image, int2 coord, int sample);
+
+float __purefn __ovld read_imagef(read_only image2d_msaa_depth_t image, int2 coord, int sample);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_msaa_t image, int4 coord, int sample);
+int4 __purefn __ovld read_imagei(read_only image2d_array_msaa_t image, int4 coord, int sample);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_msaa_t image, int4 coord, int sample);
+
+float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, int4 coord, int sample);
+#endif //cl_khr_gl_msaa_sharing
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#ifdef cl_khr_mipmap_image
+
+float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+
+float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+
+float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+
+float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+
+#endif //cl_khr_mipmap_image
+
+/**
+* Sampler-less Image Access
+*/
+
+float4 __purefn __ovld read_imagef(read_only image1d_t image, int coord);
+int4 __purefn __ovld read_imagei(read_only image1d_t image, int coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_t image, int coord);
+
+float4 __purefn __ovld read_imagef(read_only image1d_buffer_t image, int coord);
+int4 __purefn __ovld read_imagei(read_only image1d_buffer_t image, int coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_buffer_t image, int coord);
+
+float4 __purefn __ovld read_imagef(read_only image1d_array_t image, int2 coord);
+int4 __purefn __ovld read_imagei(read_only image1d_array_t image, int2 coord);
+uint4 __purefn __ovld read_imageui(read_only image1d_array_t image, int2 coord);
+
+float4 __purefn __ovld read_imagef(read_only image2d_t image, int2 coord);
+int4 __purefn __ovld read_imagei(read_only image2d_t image, int2 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_t image, int2 coord);
+
+float4 __purefn __ovld read_imagef(read_only image2d_array_t image, int4 coord);
+int4 __purefn __ovld read_imagei(read_only image2d_array_t image, int4 coord);
+uint4 __purefn __ovld read_imageui(read_only image2d_array_t image, int4 coord);
+
+#ifdef cl_khr_depth_images
+float __purefn __ovld read_imagef(read_only image2d_depth_t image, int2 coord);
+float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, int4 coord);
+#endif //cl_khr_depth_images
+
+float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord);
+int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord);
+uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord);
+
+// Image read functions returning half4 type
+#ifdef cl_khr_fp16
+half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord);
+half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord);
+half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
+half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
+half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
+half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
+half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
+half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
+half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
+half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
+half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord);
+half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
+half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
+half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
+half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
+half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
+#endif //cl_khr_fp16
+
+// Image read functions for read_write images
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
+int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
+uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
+
+float4 __purefn __ovld read_imagef(read_write image1d_buffer_t image, int coord);
+int4 __purefn __ovld read_imagei(read_write image1d_buffer_t image, int coord);
+uint4 __purefn __ovld read_imageui(read_write image1d_buffer_t image, int coord);
+
+float4 __purefn __ovld read_imagef(read_write image1d_array_t image, int2 coord);
+int4 __purefn __ovld read_imagei(read_write image1d_array_t image, int2 coord);
+uint4 __purefn __ovld read_imageui(read_write image1d_array_t image, int2 coord);
+
+float4 __purefn __ovld read_imagef(read_write image2d_t image, int2 coord);
+int4 __purefn __ovld read_imagei(read_write image2d_t image, int2 coord);
+uint4 __purefn __ovld read_imageui(read_write image2d_t image, int2 coord);
+
+float4 __purefn __ovld read_imagef(read_write image2d_array_t image, int4 coord);
+int4 __purefn __ovld read_imagei(read_write image2d_array_t image, int4 coord);
+uint4 __purefn __ovld read_imageui(read_write image2d_array_t image, int4 coord);
+
+float4 __purefn __ovld read_imagef(read_write image3d_t image, int4 coord);
+int4 __purefn __ovld read_imagei(read_write image3d_t image, int4 coord);
+uint4 __purefn __ovld read_imageui(read_write image3d_t image, int4 coord);
+
+#ifdef cl_khr_depth_images
+float __purefn __ovld read_imagef(read_write image2d_depth_t image, int2 coord);
+float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, int4 coord);
+#endif //cl_khr_depth_images
+
+#if cl_khr_gl_msaa_sharing
+float4 __purefn __ovld read_imagef(read_write image2d_msaa_t image, int2 coord, int sample);
+int4 __purefn __ovld read_imagei(read_write image2d_msaa_t image, int2 coord, int sample);
+uint4 __purefn __ovld read_imageui(read_write image2d_msaa_t image, int2 coord, int sample);
+
+float4 __purefn __ovld read_imagef(read_write image2d_array_msaa_t image, int4 coord, int sample);
+int4 __purefn __ovld read_imagei(read_write image2d_array_msaa_t image, int4 coord, int sample);
+uint4 __purefn __ovld read_imageui(read_write image2d_array_msaa_t image, int4 coord, int sample);
+
+float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 coord, int sample);
+float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
+#endif //cl_khr_gl_msaa_sharing
+
+#ifdef cl_khr_mipmap_image
+float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+
+float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+
+float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+
+float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+
+float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+
+float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+
+float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+
+float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+
+float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+
+float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+
+float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+
+float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+
+float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+#endif //cl_khr_mipmap_image
+
+// Image read functions returning half4 type
+#ifdef cl_khr_fp16
+half4 __purefn __ovld read_imageh(read_write image1d_t image, int coord);
+half4 __purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
+half4 __purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
+half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
+half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
+half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y) in the 2D image object specified by image.
+ * (coord.x, coord.y) are considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1, and 0
+ * ... image height - 1.
+
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y) in the 2D image object specified by index
+ * (coord.z) of the 2D image array object image_array.
+ * (coord.x, coord.y) are considered to be unnormalized
+ * coordinates and must be in the range 0 ... image width
+ * - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord) in the 1D image (buffer) object specified by image.
+ * coord is considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord.x) in the 1D image object specified by index
+ * (coord.y) of the 1D image array object image_array.
+ * x is considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1.
+ *
+ * Write color value to location specified by coordinate
+ * (coord.x, coord.y, coord.z) in the 3D image object specified by image.
+ * coord.x & coord.y are considered to be unnormalized coordinates
+ * and must be in the range 0 ... image width - 1, and 0
+ * ... image height - 1.
+ *
+ * For mipmap images, use mip-level specified by lod.
+ *
+ * Appropriate data format conversion to the specified
+ * image format is done before writing the color value.
+ *
+ * write_imagef can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the pre-defined packed formats or set to
+ * CL_SNORM_INT8, CL_UNORM_INT8,
+ * CL_SNORM_INT16, CL_UNORM_INT16,
+ * CL_HALF_FLOAT or CL_FLOAT. Appropriate data
+ * format conversion will be done to convert channel
+ * data from a floating-point value to actual data format
+ * in which the channels are stored.
+ *
+ * write_imagei can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_SIGNED_INT8,
+ * CL_SIGNED_INT16 and
+ * CL_SIGNED_INT32.
+ *
+ * write_imageui can only be used with image objects
+ * created with image_channel_data_type set to one of
+ * the following values:
+ * CL_UNSIGNED_INT8,
+ * CL_UNSIGNED_INT16 and
+ * CL_UNSIGNED_INT32.
+ *
+ * The behavior of write_imagef, write_imagei and
+ * write_imageui for image objects created with
+ * image_channel_data_type values not specified in
+ * the description above or with (x, y) coordinate
+ * values that are not in the range (0 ... image width -1,
+ * 0 ... image height - 1), respectively, is undefined.
+ */
+void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color);
+void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color);
+void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color);
+
+void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
+void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
+void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
+
+void __ovld write_imagef(write_only image1d_t image, int coord, float4 color);
+void __ovld write_imagei(write_only image1d_t image, int coord, int4 color);
+void __ovld write_imageui(write_only image1d_t image, int coord, uint4 color);
+
+void __ovld write_imagef(write_only image1d_buffer_t image, int coord, float4 color);
+void __ovld write_imagei(write_only image1d_buffer_t image, int coord, int4 color);
+void __ovld write_imageui(write_only image1d_buffer_t image, int coord, uint4 color);
+
+void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
+void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
+void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
+
+void __ovld write_imagef(write_only image3d_t image, int4 coord, float4 color);
+void __ovld write_imagei(write_only image3d_t image, int4 coord, int4 color);
+void __ovld write_imageui(write_only image3d_t image, int4 coord, uint4 color);
+
+#ifdef cl_khr_depth_images
+void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, float color);
+void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, float color);
+#endif //cl_khr_depth_images
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#ifdef cl_khr_mipmap_image
+void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
+void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
+void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color);
+
+void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, int lod, float4 color);
+void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int lod, int4 color);
+void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, int lod, uint4 color);
+
+void __ovld write_imagef(write_only image2d_t image, int2 coord, int lod, float4 color);
+void __ovld write_imagei(write_only image2d_t image, int2 coord, int lod, int4 color);
+void __ovld write_imageui(write_only image2d_t image, int2 coord, int lod, uint4 color);
+
+void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int lod, float4 color);
+void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color);
+void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color);
+
+void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float color);
+void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float color);
+
+void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color);
+void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color);
+void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
+#endif //cl_khr_mipmap_image
+
+// Image write functions for half4 type
+#ifdef cl_khr_fp16
+void __ovld write_imageh(write_only image1d_t image, int coord, half4 color);
+void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
+void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
+void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
+void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
+void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 color);
+#endif //cl_khr_fp16
+
+// Image write functions for read_write images
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
+void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
+void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
+
+void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
+void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
+void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
+
+void __ovld write_imagef(read_write image1d_t image, int coord, float4 color);
+void __ovld write_imagei(read_write image1d_t image, int coord, int4 color);
+void __ovld write_imageui(read_write image1d_t image, int coord, uint4 color);
+
+void __ovld write_imagef(read_write image1d_buffer_t image, int coord, float4 color);
+void __ovld write_imagei(read_write image1d_buffer_t image, int coord, int4 color);
+void __ovld write_imageui(read_write image1d_buffer_t image, int coord, uint4 color);
+
+void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
+void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
+void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
+
+void __ovld write_imagef(read_write image3d_t image, int4 coord, float4 color);
+void __ovld write_imagei(read_write image3d_t image, int4 coord, int4 color);
+void __ovld write_imageui(read_write image3d_t image, int4 coord, uint4 color);
+
+#ifdef cl_khr_depth_images
+void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float color);
+void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
+#endif //cl_khr_depth_images
+
+#ifdef cl_khr_mipmap_image
+void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
+void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
+void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color);
+
+void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, int lod, float4 color);
+void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int lod, int4 color);
+void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, int lod, uint4 color);
+
+void __ovld write_imagef(read_write image2d_t image, int2 coord, int lod, float4 color);
+void __ovld write_imagei(read_write image2d_t image, int2 coord, int lod, int4 color);
+void __ovld write_imageui(read_write image2d_t image, int2 coord, int lod, uint4 color);
+
+void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, int lod, float4 color);
+void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int lod, int4 color);
+void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, int lod, uint4 color);
+
+void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, int lod, float color);
+void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int lod, float color);
+
+void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color);
+void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color);
+void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
+#endif //cl_khr_mipmap_image
+
+// Image write functions for half4 type
+#ifdef cl_khr_fp16
+void __ovld write_imageh(read_write image1d_t image, int coord, half4 color);
+void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
+void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
+void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
+void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
+void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
+#endif //cl_khr_fp16
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
+// access qualifier, which by default assume read_only access qualifier. Image query builtin
+// functions with write_only image argument should also be declared.
+
+/**
+ * Return the image width in pixels.
+ *
+ */
+int __ovld __cnfn get_image_width(read_only image1d_t image);
+int __ovld __cnfn get_image_width(read_only image1d_buffer_t image);
+int __ovld __cnfn get_image_width(read_only image2d_t image);
+int __ovld __cnfn get_image_width(read_only image3d_t image);
+int __ovld __cnfn get_image_width(read_only image1d_array_t image);
+int __ovld __cnfn get_image_width(read_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(read_only image2d_depth_t image);
+int __ovld __cnfn get_image_width(read_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(read_only image2d_msaa_t image);
+int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_width(write_only image1d_t image);
+int __ovld __cnfn get_image_width(write_only image1d_buffer_t image);
+int __ovld __cnfn get_image_width(write_only image2d_t image);
+int __ovld __cnfn get_image_width(write_only image3d_t image);
+int __ovld __cnfn get_image_width(write_only image1d_array_t image);
+int __ovld __cnfn get_image_width(write_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(write_only image2d_depth_t image);
+int __ovld __cnfn get_image_width(write_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(write_only image2d_msaa_t image);
+int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld __cnfn get_image_width(read_write image1d_t image);
+int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
+int __ovld __cnfn get_image_width(read_write image2d_t image);
+int __ovld __cnfn get_image_width(read_write image3d_t image);
+int __ovld __cnfn get_image_width(read_write image1d_array_t image);
+int __ovld __cnfn get_image_width(read_write image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_width(read_write image2d_depth_t image);
+int __ovld __cnfn get_image_width(read_write image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_width(read_write image2d_msaa_t image);
+int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the image height in pixels.
+ */
+int __ovld __cnfn get_image_height(read_only image2d_t image);
+int __ovld __cnfn get_image_height(read_only image3d_t image);
+int __ovld __cnfn get_image_height(read_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(read_only image2d_depth_t image);
+int __ovld __cnfn get_image_height(read_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(read_only image2d_msaa_t image);
+int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_height(write_only image2d_t image);
+int __ovld __cnfn get_image_height(write_only image3d_t image);
+int __ovld __cnfn get_image_height(write_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(write_only image2d_depth_t image);
+int __ovld __cnfn get_image_height(write_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(write_only image2d_msaa_t image);
+int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld __cnfn get_image_height(read_write image2d_t image);
+int __ovld __cnfn get_image_height(read_write image3d_t image);
+int __ovld __cnfn get_image_height(read_write image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_height(read_write image2d_depth_t image);
+int __ovld __cnfn get_image_height(read_write image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_height(read_write image2d_msaa_t image);
+int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the image depth in pixels.
+ */
+int __ovld __cnfn get_image_depth(read_only image3d_t image);
+
+int __ovld __cnfn get_image_depth(write_only image3d_t image);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld __cnfn get_image_depth(read_write image3d_t image);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL Extension v2.0 s9.18 - Mipmaps
+#ifdef cl_khr_mipmap_image
+/**
+ * Return the image miplevels.
+ */
+
+int __ovld get_image_num_mip_levels(read_only image1d_t image);
+int __ovld get_image_num_mip_levels(read_only image2d_t image);
+int __ovld get_image_num_mip_levels(read_only image3d_t image);
+
+int __ovld get_image_num_mip_levels(write_only image1d_t image);
+int __ovld get_image_num_mip_levels(write_only image2d_t image);
+int __ovld get_image_num_mip_levels(write_only image3d_t image);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld get_image_num_mip_levels(read_write image1d_t image);
+int __ovld get_image_num_mip_levels(read_write image2d_t image);
+int __ovld get_image_num_mip_levels(read_write image3d_t image);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
+int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
+int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
+int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
+
+int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
+int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
+int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
+int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
+int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
+int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
+int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#endif //cl_khr_mipmap_image
+
+/**
+ * Return the channel data type. Valid values are:
+ * CLK_SNORM_INT8
+ * CLK_SNORM_INT16
+ * CLK_UNORM_INT8
+ * CLK_UNORM_INT16
+ * CLK_UNORM_SHORT_565
+ * CLK_UNORM_SHORT_555
+ * CLK_UNORM_SHORT_101010
+ * CLK_SIGNED_INT8
+ * CLK_SIGNED_INT16
+ * CLK_SIGNED_INT32
+ * CLK_UNSIGNED_INT8
+ * CLK_UNSIGNED_INT16
+ * CLK_UNSIGNED_INT32
+ * CLK_HALF_FLOAT
+ * CLK_FLOAT
+ */
+
+//
+// Channel Datatype.
+//
+#define CLK_SNORM_INT8 0x10D0
+#define CLK_SNORM_INT16 0x10D1
+#define CLK_UNORM_INT8 0x10D2
+#define CLK_UNORM_INT16 0x10D3
+#define CLK_UNORM_SHORT_565 0x10D4
+#define CLK_UNORM_SHORT_555 0x10D5
+#define CLK_UNORM_INT_101010 0x10D6
+#define CLK_SIGNED_INT8 0x10D7
+#define CLK_SIGNED_INT16 0x10D8
+#define CLK_SIGNED_INT32 0x10D9
+#define CLK_UNSIGNED_INT8 0x10DA
+#define CLK_UNSIGNED_INT16 0x10DB
+#define CLK_UNSIGNED_INT32 0x10DC
+#define CLK_HALF_FLOAT 0x10DD
+#define CLK_FLOAT 0x10DE
+#define CLK_UNORM_INT24 0x10DF
+
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image3d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image3d_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image3d_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the image channel order. Valid values are:
+ * CLK_A
+ * CLK_R
+ * CLK_Rx
+ * CLK_RG
+ * CLK_RGx
+ * CLK_RA
+ * CLK_RGB
+ * CLK_RGBx
+ * CLK_RGBA
+ * CLK_ARGB
+ * CLK_BGRA
+ * CLK_INTENSITY
+ * CLK_LUMINANCE
+ */
+// Channel order, numbering must be aligned with cl_channel_order in cl.h
+//
+#define CLK_R 0x10B0
+#define CLK_A 0x10B1
+#define CLK_RG 0x10B2
+#define CLK_RA 0x10B3
+#define CLK_RGB 0x10B4
+#define CLK_RGBA 0x10B5
+#define CLK_BGRA 0x10B6
+#define CLK_ARGB 0x10B7
+#define CLK_INTENSITY 0x10B8
+#define CLK_LUMINANCE 0x10B9
+#define CLK_Rx 0x10BA
+#define CLK_RGx 0x10BB
+#define CLK_RGBx 0x10BC
+#define CLK_DEPTH 0x10BD
+#define CLK_DEPTH_STENCIL 0x10BE
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#define CLK_sRGB 0x10BF
+#define CLK_sRGBA 0x10C1
+#define CLK_sRGBx 0x10C0
+#define CLK_sBGRA 0x10C2
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+int __ovld __cnfn get_image_channel_order(read_only image1d_t image);
+int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_t image);
+int __ovld __cnfn get_image_channel_order(read_only image3d_t image);
+int __ovld __cnfn get_image_channel_order(read_only image1d_array_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+int __ovld __cnfn get_image_channel_order(write_only image1d_t image);
+int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_t image);
+int __ovld __cnfn get_image_channel_order(write_only image3d_t image);
+int __ovld __cnfn get_image_channel_order(write_only image1d_array_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
+int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
+int __ovld __cnfn get_image_channel_order(read_write image3d_t image);
+int __ovld __cnfn get_image_channel_order(read_write image1d_array_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_t image);
+#ifdef cl_khr_depth_images
+int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the 2D image width and height as an int2
+ * type. The width is returned in the x component, and
+ * the height in the y component.
+ */
+int2 __ovld __cnfn get_image_dim(read_only image2d_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+int2 __ovld __cnfn get_image_dim(write_only image2d_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_t image);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
+#ifdef cl_khr_depth_images
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t image);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the 3D image width, height, and depth as an
+ * int4 type. The width is returned in the x
+ * component, height in the y component, depth in the z
+ * component and the w component is 0.
+ */
+int4 __ovld __cnfn get_image_dim(read_only image3d_t image);
+int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+ * Return the image array size.
+ */
+
+size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t image_array);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t image_array);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t image_array);
+#endif //cl_khr_gl_msaa_sharing
+
+size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t image_array);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t image_array);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_array);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
+#endif //cl_khr_gl_msaa_sharing
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
+#ifdef cl_khr_depth_images
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image_array);
+#endif //cl_khr_depth_images
+#if defined(cl_khr_gl_msaa_sharing)
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
+#endif //cl_khr_gl_msaa_sharing
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+/**
+* Return the number of samples associated with image
+*/
+#if defined(cl_khr_gl_msaa_sharing)
+int __ovld get_image_num_samples(read_only image2d_msaa_t image);
+int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
+int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
+int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
+int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
+
+int __ovld get_image_num_samples(write_only image2d_msaa_t image);
+int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
+int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
+int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
+int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld get_image_num_samples(read_write image2d_msaa_t image);
+int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
+int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
+int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
+int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif
+
+// OpenCL v2.0 s6.13.15 - Work-group Functions
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+int __ovld work_group_all(int predicate);
+int __ovld work_group_any(int predicate);
+
+#ifdef cl_khr_fp16
+half __ovld work_group_broadcast(half a, size_t local_id);
+half __ovld work_group_broadcast(half a, size_t x, size_t y);
+half __ovld work_group_broadcast(half a, size_t x, size_t y, size_t z);
+#endif
+int __ovld work_group_broadcast(int a, size_t local_id);
+int __ovld work_group_broadcast(int a, size_t x, size_t y);
+int __ovld work_group_broadcast(int a, size_t x, size_t y, size_t z);
+uint __ovld work_group_broadcast(uint a, size_t local_id);
+uint __ovld work_group_broadcast(uint a, size_t x, size_t y);
+uint __ovld work_group_broadcast(uint a, size_t x, size_t y, size_t z);
+long __ovld work_group_broadcast(long a, size_t local_id);
+long __ovld work_group_broadcast(long a, size_t x, size_t y);
+long __ovld work_group_broadcast(long a, size_t x, size_t y, size_t z);
+ulong __ovld work_group_broadcast(ulong a, size_t local_id);
+ulong __ovld work_group_broadcast(ulong a, size_t x, size_t y);
+ulong __ovld work_group_broadcast(ulong a, size_t x, size_t y, size_t z);
+float __ovld work_group_broadcast(float a, size_t local_id);
+float __ovld work_group_broadcast(float a, size_t x, size_t y);
+float __ovld work_group_broadcast(float a, size_t x, size_t y, size_t z);
+#ifdef cl_khr_fp64
+double __ovld work_group_broadcast(double a, size_t local_id);
+double __ovld work_group_broadcast(double a, size_t x, size_t y);
+double __ovld work_group_broadcast(double a, size_t x, size_t y, size_t z);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld work_group_reduce_add(half x);
+half __ovld work_group_reduce_min(half x);
+half __ovld work_group_reduce_max(half x);
+half __ovld work_group_scan_exclusive_add(half x);
+half __ovld work_group_scan_exclusive_min(half x);
+half __ovld work_group_scan_exclusive_max(half x);
+half __ovld work_group_scan_inclusive_add(half x);
+half __ovld work_group_scan_inclusive_min(half x);
+half __ovld work_group_scan_inclusive_max(half x);
+#endif
+int __ovld work_group_reduce_add(int x);
+int __ovld work_group_reduce_min(int x);
+int __ovld work_group_reduce_max(int x);
+int __ovld work_group_scan_exclusive_add(int x);
+int __ovld work_group_scan_exclusive_min(int x);
+int __ovld work_group_scan_exclusive_max(int x);
+int __ovld work_group_scan_inclusive_add(int x);
+int __ovld work_group_scan_inclusive_min(int x);
+int __ovld work_group_scan_inclusive_max(int x);
+uint __ovld work_group_reduce_add(uint x);
+uint __ovld work_group_reduce_min(uint x);
+uint __ovld work_group_reduce_max(uint x);
+uint __ovld work_group_scan_exclusive_add(uint x);
+uint __ovld work_group_scan_exclusive_min(uint x);
+uint __ovld work_group_scan_exclusive_max(uint x);
+uint __ovld work_group_scan_inclusive_add(uint x);
+uint __ovld work_group_scan_inclusive_min(uint x);
+uint __ovld work_group_scan_inclusive_max(uint x);
+long __ovld work_group_reduce_add(long x);
+long __ovld work_group_reduce_min(long x);
+long __ovld work_group_reduce_max(long x);
+long __ovld work_group_scan_exclusive_add(long x);
+long __ovld work_group_scan_exclusive_min(long x);
+long __ovld work_group_scan_exclusive_max(long x);
+long __ovld work_group_scan_inclusive_add(long x);
+long __ovld work_group_scan_inclusive_min(long x);
+long __ovld work_group_scan_inclusive_max(long x);
+ulong __ovld work_group_reduce_add(ulong x);
+ulong __ovld work_group_reduce_min(ulong x);
+ulong __ovld work_group_reduce_max(ulong x);
+ulong __ovld work_group_scan_exclusive_add(ulong x);
+ulong __ovld work_group_scan_exclusive_min(ulong x);
+ulong __ovld work_group_scan_exclusive_max(ulong x);
+ulong __ovld work_group_scan_inclusive_add(ulong x);
+ulong __ovld work_group_scan_inclusive_min(ulong x);
+ulong __ovld work_group_scan_inclusive_max(ulong x);
+float __ovld work_group_reduce_add(float x);
+float __ovld work_group_reduce_min(float x);
+float __ovld work_group_reduce_max(float x);
+float __ovld work_group_scan_exclusive_add(float x);
+float __ovld work_group_scan_exclusive_min(float x);
+float __ovld work_group_scan_exclusive_max(float x);
+float __ovld work_group_scan_inclusive_add(float x);
+float __ovld work_group_scan_inclusive_min(float x);
+float __ovld work_group_scan_inclusive_max(float x);
+#ifdef cl_khr_fp64
+double __ovld work_group_reduce_add(double x);
+double __ovld work_group_reduce_min(double x);
+double __ovld work_group_reduce_max(double x);
+double __ovld work_group_scan_exclusive_add(double x);
+double __ovld work_group_scan_exclusive_min(double x);
+double __ovld work_group_scan_exclusive_max(double x);
+double __ovld work_group_scan_inclusive_add(double x);
+double __ovld work_group_scan_inclusive_min(double x);
+double __ovld work_group_scan_inclusive_max(double x);
+#endif //cl_khr_fp64
+
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v2.0 s6.13.16 - Pipe Functions
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#define PIPE_RESERVE_ID_VALID_BIT (1U << 30)
+#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
+bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+
+// OpenCL v2.0 s6.13.17 - Enqueue Kernels
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#define CL_COMPLETE 0x0
+#define CL_RUNNING 0x1
+#define CL_SUBMITTED 0x2
+#define CL_QUEUED 0x3
+
+#define CLK_SUCCESS 0
+#define CLK_ENQUEUE_FAILURE -101
+#define CLK_INVALID_QUEUE -102
+#define CLK_INVALID_NDRANGE -160
+#define CLK_INVALID_EVENT_WAIT_LIST -57
+#define CLK_DEVICE_QUEUE_FULL -161
+#define CLK_INVALID_ARG_SIZE -51
+#define CLK_EVENT_ALLOCATION_FAILURE -100
+#define CLK_OUT_OF_RESOURCES -5
+
+#define CLK_NULL_QUEUE 0
+#define CLK_NULL_EVENT (__builtin_astype(((void*)(__SIZE_MAX__)), clk_event_t))
+
+// execution model related definitions
+#define CLK_ENQUEUE_FLAGS_NO_WAIT 0x0
+#define CLK_ENQUEUE_FLAGS_WAIT_KERNEL 0x1
+#define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP 0x2
+
+typedef int kernel_enqueue_flags_t;
+typedef int clk_profiling_info;
+
+// Profiling info name (see capture_event_profiling_info)
+#define CLK_PROFILING_COMMAND_EXEC_TIME 0x1
+
+#define MAX_WORK_DIM 3
+
+// ToDo: Remove definition of ndrange_t in Clang as an opaque type and add back
+// the following ndrange_t definition.
+#if 0
+typedef struct {
+ unsigned int workDimension;
+ size_t globalWorkOffset[MAX_WORK_DIM];
+ size_t globalWorkSize[MAX_WORK_DIM];
+ size_t localWorkSize[MAX_WORK_DIM];
+} ndrange_t;
+#endif
+
+ndrange_t __ovld ndrange_1D(size_t);
+ndrange_t __ovld ndrange_1D(size_t, size_t);
+ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
+
+ndrange_t __ovld ndrange_2D(const size_t[2]);
+ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]);
+ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]);
+
+ndrange_t __ovld ndrange_3D(const size_t[3]);
+ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]);
+ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]);
+
+int __ovld enqueue_marker(queue_t, uint, const __private clk_event_t*, __private clk_event_t*);
+
+void __ovld retain_event(clk_event_t);
+
+void __ovld release_event(clk_event_t);
+
+clk_event_t create_user_event(void);
+
+void __ovld set_user_event_status(clk_event_t e, int state);
+
+bool is_valid_event (clk_event_t event);
+
+void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
+
+queue_t __ovld get_default_queue(void);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL Extension v2.0 s9.17 - Sub-groups
+
+#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
+// Shared Sub Group Functions
+uint __ovld get_sub_group_size(void);
+uint __ovld get_max_sub_group_size(void);
+uint __ovld get_num_sub_groups(void);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+uint __ovld get_enqueued_num_sub_groups(void);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+uint __ovld get_sub_group_id(void);
+uint __ovld get_sub_group_local_id(void);
+
+void __ovld sub_group_barrier(cl_mem_fence_flags flags);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+void __ovld sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+int __ovld sub_group_all(int predicate);
+int __ovld sub_group_any(int predicate);
+
+int __ovld sub_group_broadcast(int x, uint sub_group_local_id);
+uint __ovld sub_group_broadcast(uint x, uint sub_group_local_id);
+long __ovld sub_group_broadcast(long x, uint sub_group_local_id);
+ulong __ovld sub_group_broadcast(ulong x, uint sub_group_local_id);
+float __ovld sub_group_broadcast(float x, uint sub_group_local_id);
+
+int __ovld sub_group_reduce_add(int x);
+uint __ovld sub_group_reduce_add(uint x);
+long __ovld sub_group_reduce_add(long x);
+ulong __ovld sub_group_reduce_add(ulong x);
+float __ovld sub_group_reduce_add(float x);
+int __ovld sub_group_reduce_min(int x);
+uint __ovld sub_group_reduce_min(uint x);
+long __ovld sub_group_reduce_min(long x);
+ulong __ovld sub_group_reduce_min(ulong x);
+float __ovld sub_group_reduce_min(float x);
+int __ovld sub_group_reduce_max(int x);
+uint __ovld sub_group_reduce_max(uint x);
+long __ovld sub_group_reduce_max(long x);
+ulong __ovld sub_group_reduce_max(ulong x);
+float __ovld sub_group_reduce_max(float x);
+
+int __ovld sub_group_scan_exclusive_add(int x);
+uint __ovld sub_group_scan_exclusive_add(uint x);
+long __ovld sub_group_scan_exclusive_add(long x);
+ulong __ovld sub_group_scan_exclusive_add(ulong x);
+float __ovld sub_group_scan_exclusive_add(float x);
+int __ovld sub_group_scan_exclusive_min(int x);
+uint __ovld sub_group_scan_exclusive_min(uint x);
+long __ovld sub_group_scan_exclusive_min(long x);
+ulong __ovld sub_group_scan_exclusive_min(ulong x);
+float __ovld sub_group_scan_exclusive_min(float x);
+int __ovld sub_group_scan_exclusive_max(int x);
+uint __ovld sub_group_scan_exclusive_max(uint x);
+long __ovld sub_group_scan_exclusive_max(long x);
+ulong __ovld sub_group_scan_exclusive_max(ulong x);
+float __ovld sub_group_scan_exclusive_max(float x);
+
+int __ovld sub_group_scan_inclusive_add(int x);
+uint __ovld sub_group_scan_inclusive_add(uint x);
+long __ovld sub_group_scan_inclusive_add(long x);
+ulong __ovld sub_group_scan_inclusive_add(ulong x);
+float __ovld sub_group_scan_inclusive_add(float x);
+int __ovld sub_group_scan_inclusive_min(int x);
+uint __ovld sub_group_scan_inclusive_min(uint x);
+long __ovld sub_group_scan_inclusive_min(long x);
+ulong __ovld sub_group_scan_inclusive_min(ulong x);
+float __ovld sub_group_scan_inclusive_min(float x);
+int __ovld sub_group_scan_inclusive_max(int x);
+uint __ovld sub_group_scan_inclusive_max(uint x);
+long __ovld sub_group_scan_inclusive_max(long x);
+ulong __ovld sub_group_scan_inclusive_max(ulong x);
+float __ovld sub_group_scan_inclusive_max(float x);
+
+#ifdef cl_khr_fp16
+half __ovld sub_group_broadcast(half x, uint sub_group_local_id);
+half __ovld sub_group_reduce_add(half x);
+half __ovld sub_group_reduce_min(half x);
+half __ovld sub_group_reduce_max(half x);
+half __ovld sub_group_scan_exclusive_add(half x);
+half __ovld sub_group_scan_exclusive_min(half x);
+half __ovld sub_group_scan_exclusive_max(half x);
+half __ovld sub_group_scan_inclusive_add(half x);
+half __ovld sub_group_scan_inclusive_min(half x);
+half __ovld sub_group_scan_inclusive_max(half x);
+#endif //cl_khr_fp16
+
+#ifdef cl_khr_fp64
+double __ovld sub_group_broadcast(double x, uint sub_group_local_id);
+double __ovld sub_group_reduce_add(double x);
+double __ovld sub_group_reduce_min(double x);
+double __ovld sub_group_reduce_max(double x);
+double __ovld sub_group_scan_exclusive_add(double x);
+double __ovld sub_group_scan_exclusive_min(double x);
+double __ovld sub_group_scan_exclusive_max(double x);
+double __ovld sub_group_scan_inclusive_add(double x);
+double __ovld sub_group_scan_inclusive_min(double x);
+double __ovld sub_group_scan_inclusive_max(double x);
+#endif //cl_khr_fp64
+
+#endif //cl_khr_subgroups cl_intel_subgroups
+
+// Disable any extensions we may have enabled previously.
+#pragma OPENCL EXTENSION all : disable
+
+#undef __cnfn
+#undef __ovld
+#endif //_OPENCL_H_
diff --git a/current/clang-include/pkuintrin.h b/current/clang-include/pkuintrin.h
new file mode 100644
index 0000000..9e54594
--- /dev/null
+++ b/current/clang-include/pkuintrin.h
@@ -0,0 +1,48 @@
+/*===------------- pkuintrin.h - PKU intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <pkuintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __PKUINTRIN_H
+#define __PKUINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("pku")))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rdpkru_u32(void)
+{
+ return __builtin_ia32_rdpkru();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_wrpkru(unsigned int __val)
+{
+ return __builtin_ia32_wrpkru(__val);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/pmmintrin.h b/current/clang-include/pmmintrin.h
new file mode 100644
index 0000000..5b10580
--- /dev/null
+++ b/current/clang-include/pmmintrin.h
@@ -0,0 +1,311 @@
+/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __PMMINTRIN_H
+#define __PMMINTRIN_H
+
+#include <emmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("sse3")))
+
+/// \brief Loads data from an unaligned memory location to elements in a 128-bit
+/// vector. If the address of the data is not 16-byte aligned, the
+/// instruction may read two adjacent aligned blocks of memory to retrieve
+/// the requested data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VLDDQU instruction.
+///
+/// \param __p
+/// A pointer to a 128-bit integer vector containing integer values.
+/// \returns A 128-bit vector containing the moved values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_lddqu_si128(__m128i const *__p)
+{
+ return (__m128i)__builtin_ia32_lddqu((char const *)__p);
+}
+
+/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+/// two 128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDSUBPS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the left source operand.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing the right source operand.
+/// \returns A 128-bit vector of [4 x float] containing the alternating sums and
+/// differences of both operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_addsub_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Horizontally adds the adjacent pairs of values contained in two
+/// 128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHADDPS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// The horizontal sums of the values are stored in the lower bits of the
+/// destination.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// The horizontal sums of the values are stored in the upper bits of the
+/// destination.
+/// \returns A 128-bit vector of [4 x float] containing the horizontal sums of
+/// both operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_hadd_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Horizontally subtracts the adjacent pairs of values contained in two
+/// 128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHSUBPS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// The horizontal differences between the values are stored in the lower
+/// bits of the destination.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// The horizontal differences between the values are stored in the upper
+/// bits of the destination.
+/// \returns A 128-bit vector of [4 x float] containing the horizontal
+/// differences of both operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_hsub_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Moves and duplicates high-order (odd-indexed) values from a 128-bit
+/// vector of [4 x float] to float values stored in a 128-bit vector of
+/// [4 x float].
+/// Bits [127:96] of the source are written to bits [127:96] and [95:64] of
+/// the destination.
+/// Bits [63:32] of the source are written to bits [63:32] and [31:0] of the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSHDUP instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated
+/// values.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_movehdup_ps(__m128 __a)
+{
+ return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3);
+}
+
+/// \brief Duplicates low-order (even-indexed) values from a 128-bit
+/// vector of [4 x float] to float values stored in a 128-bit vector of
+/// [4 x float].
+/// Bits [95:64] of the source are written to bits [127:96] and [95:64] of
+/// the destination.
+/// Bits [31:0] of the source are written to bits [63:32] and [31:0] of the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSLDUP instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated
+/// values.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_moveldup_ps(__m128 __a)
+{
+ return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2);
+}
+
+/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+/// two 128-bit vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDSUBPD instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the left source operand.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing the right source operand.
+/// \returns A 128-bit vector of [2 x double] containing the alternating sums
+/// and differences of both operands.
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_addsub_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b);
+}
+
+/// \brief Horizontally adds the pairs of values contained in two 128-bit
+/// vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHADDPD instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// The horizontal sum of the values is stored in the lower bits of the
+/// destination.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// The horizontal sum of the values is stored in the upper bits of the
+/// destination.
+/// \returns A 128-bit vector of [2 x double] containing the horizontal sums of
+/// both operands.
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_hadd_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_haddpd((__v2df)__a, (__v2df)__b);
+}
+
+/// \brief Horizontally subtracts the pairs of values contained in two 128-bit
+/// vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VHSUBPD instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// The horizontal difference of the values is stored in the lower bits of
+/// the destination.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// The horizontal difference of the values is stored in the upper bits of
+/// the destination.
+/// \returns A 128-bit vector of [2 x double] containing the horizontal
+/// differences of both operands.
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_hsub_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_hsubpd((__v2df)__a, (__v2df)__b);
+}
+
+/// \brief Moves and duplicates one double-precision value to double-precision
+/// values stored in a 128-bit vector of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_loaddup_pd(double const * dp);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VMOVDDUP instruction.
+///
+/// \param dp
+/// A pointer to a double-precision value to be moved and duplicated.
+/// \returns A 128-bit vector of [2 x double] containing the moved and
+/// duplicated values.
+#define _mm_loaddup_pd(dp) _mm_load1_pd(dp)
+
+/// \brief Moves and duplicates the double-precision value in the lower bits of
+/// a 128-bit vector of [2 x double] to double-precision values stored in a
+/// 128-bit vector of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVDDUP instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. Bits [63:0] are written to bits
+/// [127:64] and [63:0] of the destination.
+/// \returns A 128-bit vector of [2 x double] containing the moved and
+/// duplicated values.
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_movedup_pd(__m128d __a)
+{
+ return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
+}
+
+#define _MM_DENORMALS_ZERO_ON (0x0040)
+#define _MM_DENORMALS_ZERO_OFF (0x0000)
+
+#define _MM_DENORMALS_ZERO_MASK (0x0040)
+
+#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
+#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
+
+/// \brief Establishes a linear address memory range to be monitored and puts
+/// the processor in the monitor event pending state. Data stored in the
+/// monitored address range causes the processor to exit the pending state.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MONITOR instruction.
+///
+/// \param __p
+/// The memory range to be monitored. The size of the range is determined by
+/// CPUID function 0000_0005h.
+/// \param __extensions
+/// Optional extensions for the monitoring state.
+/// \param __hints
+/// Optional hints for the monitoring state.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
+{
+ __builtin_ia32_monitor((void *)__p, __extensions, __hints);
+}
+
+/// \brief Used with the MONITOR instruction to wait while the processor is in
+/// the monitor event pending state. Data stored in the monitored address
+/// range causes the processor to exit the pending state.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MWAIT instruction.
+///
+/// \param __extensions
+/// Optional extensions for the monitoring state, which may vary by
+/// processor.
+/// \param __hints
+/// Optional hints for the monitoring state, which may vary by processor.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mwait(unsigned __extensions, unsigned __hints)
+{
+ __builtin_ia32_mwait(__extensions, __hints);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __PMMINTRIN_H */
diff --git a/current/clang-include/popcntintrin.h b/current/clang-include/popcntintrin.h
new file mode 100644
index 0000000..7e2f167
--- /dev/null
+++ b/current/clang-include/popcntintrin.h
@@ -0,0 +1,98 @@
+/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _POPCNTINTRIN_H
+#define _POPCNTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
+
+/// \brief Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POPCNT instruction.
+///
+/// \param __A
+/// An unsigned 32-bit integer operand.
+/// \returns A 32-bit integer containing the number of bits with value 1 in the
+/// source operand.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_popcnt_u32(unsigned int __A)
+{
+ return __builtin_popcount(__A);
+}
+
+/// \brief Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POPCNT instruction.
+///
+/// \param __A
+/// A signed 32-bit integer operand.
+/// \returns A 32-bit integer containing the number of bits with value 1 in the
+/// source operand.
+static __inline__ int __DEFAULT_FN_ATTRS
+_popcnt32(int __A)
+{
+ return __builtin_popcount(__A);
+}
+
+#ifdef __x86_64__
+/// \brief Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POPCNT instruction.
+///
+/// \param __A
+/// An unsigned 64-bit integer operand.
+/// \returns A 64-bit integer containing the number of bits with value 1 in the
+/// source operand.
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_popcnt_u64(unsigned long long __A)
+{
+ return __builtin_popcountll(__A);
+}
+
+/// \brief Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POPCNT instruction.
+///
+/// \param __A
+/// A signed 64-bit integer operand.
+/// \returns A 64-bit integer containing the number of bits with value 1 in the
+/// source operand.
+static __inline__ long long __DEFAULT_FN_ATTRS
+_popcnt64(long long __A)
+{
+ return __builtin_popcountll(__A);
+}
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* _POPCNTINTRIN_H */
diff --git a/current/clang-include/prfchwintrin.h b/current/clang-include/prfchwintrin.h
new file mode 100644
index 0000000..ba02857
--- /dev/null
+++ b/current/clang-include/prfchwintrin.h
@@ -0,0 +1,45 @@
+/*===---- prfchwintrin.h - PREFETCHW intrinsic -----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined(__X86INTRIN_H) && !defined(_MM3DNOW_H_INCLUDED)
+#error "Never use <prfchwintrin.h> directly; include <x86intrin.h> or <mm3dnow.h> instead."
+#endif
+
+#ifndef __PRFCHWINTRIN_H
+#define __PRFCHWINTRIN_H
+
+#if defined(__PRFCHW__) || defined(__3dNOW__)
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_m_prefetch(void *__P)
+{
+ __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_m_prefetchw(void *__P)
+{
+ __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */);
+}
+#endif
+
+#endif /* __PRFCHWINTRIN_H */
diff --git a/current/clang-include/rdseedintrin.h b/current/clang-include/rdseedintrin.h
new file mode 100644
index 0000000..421f4ea
--- /dev/null
+++ b/current/clang-include/rdseedintrin.h
@@ -0,0 +1,56 @@
+/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <rdseedintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __RDSEEDINTRIN_H
+#define __RDSEEDINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed")))
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_rdseed16_step(unsigned short *__p)
+{
+ return __builtin_ia32_rdseed16_step(__p);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_rdseed32_step(unsigned int *__p)
+{
+ return __builtin_ia32_rdseed32_step(__p);
+}
+
+#ifdef __x86_64__
+static __inline__ int __DEFAULT_FN_ATTRS
+_rdseed64_step(unsigned long long *__p)
+{
+ return __builtin_ia32_rdseed64_step(__p);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __RDSEEDINTRIN_H */
diff --git a/current/clang-include/rtmintrin.h b/current/clang-include/rtmintrin.h
new file mode 100644
index 0000000..e6a58d7
--- /dev/null
+++ b/current/clang-include/rtmintrin.h
@@ -0,0 +1,59 @@
+/*===---- rtmintrin.h - RTM intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <rtmintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __RTMINTRIN_H
+#define __RTMINTRIN_H
+
+#define _XBEGIN_STARTED (~0u)
+#define _XABORT_EXPLICIT (1 << 0)
+#define _XABORT_RETRY (1 << 1)
+#define _XABORT_CONFLICT (1 << 2)
+#define _XABORT_CAPACITY (1 << 3)
+#define _XABORT_DEBUG (1 << 4)
+#define _XABORT_NESTED (1 << 5)
+#define _XABORT_CODE(x) (((x) >> 24) & 0xFF)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rtm")))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_xbegin(void)
+{
+ return __builtin_ia32_xbegin();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xend(void)
+{
+ __builtin_ia32_xend();
+}
+
+#define _xabort(imm) __builtin_ia32_xabort((imm))
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __RTMINTRIN_H */
diff --git a/current/clang-include/s390intrin.h b/current/clang-include/s390intrin.h
new file mode 100644
index 0000000..d51274c
--- /dev/null
+++ b/current/clang-include/s390intrin.h
@@ -0,0 +1,39 @@
+/*===---- s390intrin.h - SystemZ intrinsics --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __S390INTRIN_H
+#define __S390INTRIN_H
+
+#ifndef __s390__
+#error "<s390intrin.h> is for s390 only"
+#endif
+
+#ifdef __HTM__
+#include <htmintrin.h>
+#endif
+
+#ifdef __VEC__
+#include <vecintrin.h>
+#endif
+
+#endif /* __S390INTRIN_H*/
diff --git a/current/clang-include/shaintrin.h b/current/clang-include/shaintrin.h
new file mode 100644
index 0000000..9b5d218
--- /dev/null
+++ b/current/clang-include/shaintrin.h
@@ -0,0 +1,75 @@
+/*===---- shaintrin.h - SHA intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <shaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __SHAINTRIN_H
+#define __SHAINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha")))
+
+#define _mm_sha1rnds4_epu32(V1, V2, M) __extension__ ({ \
+ __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)
+{
+ return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha256msg2((__v4si)__X, (__v4si)__Y);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __SHAINTRIN_H */
diff --git a/current/clang-include/smmintrin.h b/current/clang-include/smmintrin.h
new file mode 100644
index 0000000..e48ab03
--- /dev/null
+++ b/current/clang-include/smmintrin.h
@@ -0,0 +1,507 @@
+/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _SMMINTRIN_H
+#define _SMMINTRIN_H
+
+#include <tmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1")))
+
+/* SSE4 Rounding macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
+#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
+#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
+#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
+#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
+#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
+#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
+
+#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
+#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
+
+#define _mm_round_ps(X, M) __extension__ ({ \
+ (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)); })
+
+#define _mm_round_ss(X, Y, M) __extension__ ({ \
+ (__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (M)); })
+
+#define _mm_round_pd(X, M) __extension__ ({ \
+ (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)); })
+
+#define _mm_round_sd(X, Y, M) __extension__ ({ \
+ (__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (M)); })
+
+/* SSE4 Packed Blending Intrinsics. */
+#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(V1), \
+ (__v2df)(__m128d)(V2), \
+ (((M) & 0x01) ? 2 : 0), \
+ (((M) & 0x02) ? 3 : 1)); })
+
+#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
+ (((M) & 0x01) ? 4 : 0), \
+ (((M) & 0x02) ? 5 : 1), \
+ (((M) & 0x04) ? 6 : 2), \
+ (((M) & 0x08) ? 7 : 3)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
+{
+ return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
+ (__v2df)__M);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
+{
+ return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
+ (__v4sf)__M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
+{
+ return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
+ (__v16qi)__M);
+}
+
+#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(V1), \
+ (__v8hi)(__m128i)(V2), \
+ (((M) & 0x01) ? 8 : 0), \
+ (((M) & 0x02) ? 9 : 1), \
+ (((M) & 0x04) ? 10 : 2), \
+ (((M) & 0x08) ? 11 : 3), \
+ (((M) & 0x10) ? 12 : 4), \
+ (((M) & 0x20) ? 13 : 5), \
+ (((M) & 0x40) ? 14 : 6), \
+ (((M) & 0x80) ? 15 : 7)); })
+
+/* SSE4 Dword Multiply Instructions. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mul_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
+}
+
+/* SSE4 Floating Point Dot Product Instructions. */
+#define _mm_dp_ps(X, Y, M) __extension__ ({ \
+ (__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (M)); })
+
+#define _mm_dp_pd(X, Y, M) __extension__ ({\
+ (__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (M)); })
+
+/* SSE4 Streaming Load Hint Instruction. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_stream_load_si128 (__m128i const *__V)
+{
+ return (__m128i) __builtin_ia32_movntdqa ((const __v2di *) __V);
+}
+
+/* SSE4 Packed Integer Min/Max Instructions. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi8 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi8 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu16 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu16 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
+}
+
+/* SSE4 Insertion and Extraction from XMM Register Instructions. */
+#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
+#define _mm_extract_ps(X, N) (__extension__ \
+ ({ union { int __i; float __f; } __t; \
+ __v4sf __a = (__v4sf)(__m128)(X); \
+ __t.__f = __a[(N) & 3]; \
+ __t.__i;}))
+
+/* Miscellaneous insert and extract macros. */
+/* Extract a single-precision float from X at index N into D. */
+#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
+ (D) = __a[N]; }))
+
+/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
+ an index suitable for _mm_insert_ps. */
+#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
+
+/* Extract a float from X at index N into the first index of the return. */
+#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
+ _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
+
+/* Insert int into packed integer array at index. */
+#define _mm_insert_epi8(X, I, N) (__extension__ \
+ ({ __v16qi __a = (__v16qi)(__m128i)(X); \
+ __a[(N) & 15] = (I); \
+ (__m128i)__a;}))
+#define _mm_insert_epi32(X, I, N) (__extension__ \
+ ({ __v4si __a = (__v4si)(__m128i)(X); \
+ __a[(N) & 3] = (I); \
+ (__m128i)__a;}))
+#ifdef __x86_64__
+#define _mm_insert_epi64(X, I, N) (__extension__ \
+ ({ __v2di __a = (__v2di)(__m128i)(X); \
+ __a[(N) & 1] = (I); \
+ (__m128i)__a;}))
+#endif /* __x86_64__ */
+
+/* Extract int from packed integer array at index. This returns the element
+ * as a zero extended value, so it is unsigned.
+ */
+#define _mm_extract_epi8(X, N) (__extension__ \
+ ({ __v16qi __a = (__v16qi)(__m128i)(X); \
+ (int)(unsigned char) __a[(N) & 15];}))
+#define _mm_extract_epi32(X, N) (__extension__ \
+ ({ __v4si __a = (__v4si)(__m128i)(X); \
+ (int)__a[(N) & 3];}))
+#ifdef __x86_64__
+#define _mm_extract_epi64(X, N) (__extension__ \
+ ({ __v2di __a = (__v2di)(__m128i)(X); \
+ (long long)__a[(N) & 1];}))
+#endif /* __x86_64 */
+
+/* SSE4 128-bit Packed Integer Comparisons. */
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_testz_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_testc_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_testnzc_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
+}
+
+#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
+#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
+
+/* SSE4 64-bit Packed Integer Comparisons. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
+{
+ return (__m128i)((__v2di)__V1 == (__v2di)__V2);
+}
+
+/* SSE4 Packed Integer Sign-Extension. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi8_epi16(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi8_epi32(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi8_epi64(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi16_epi32(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi16_epi64(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi32_epi64(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
+}
+
+/* SSE4 Packed Integer Zero-Extension. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu8_epi16(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu8_epi32(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu8_epi64(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu16_epi32(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu16_epi64(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu32_epi64(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
+}
+
+/* SSE4 Pack with Unsigned Saturation. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packus_epi32(__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
+}
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
+ (__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
+ (__v16qi)(__m128i)(Y), (M)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_minpos_epu16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+}
+
+/* Handle the sse4.2 definitions here. */
+
+/* These definitions are normally in nmmintrin.h, but gcc puts them in here
+ so we'll do the same. */
+
+#undef __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+
+/* These specify the type of data that we're comparing. */
+#define _SIDD_UBYTE_OPS 0x00
+#define _SIDD_UWORD_OPS 0x01
+#define _SIDD_SBYTE_OPS 0x02
+#define _SIDD_SWORD_OPS 0x03
+
+/* These specify the type of comparison operation. */
+#define _SIDD_CMP_EQUAL_ANY 0x00
+#define _SIDD_CMP_RANGES 0x04
+#define _SIDD_CMP_EQUAL_EACH 0x08
+#define _SIDD_CMP_EQUAL_ORDERED 0x0c
+
+/* These macros specify the polarity of the operation. */
+#define _SIDD_POSITIVE_POLARITY 0x00
+#define _SIDD_NEGATIVE_POLARITY 0x10
+#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
+
+/* These macros are used in _mm_cmpXstri() to specify the return. */
+#define _SIDD_LEAST_SIGNIFICANT 0x00
+#define _SIDD_MOST_SIGNIFICANT 0x40
+
+/* These macros are used in _mm_cmpXstri() to specify the return. */
+#define _SIDD_BIT_MASK 0x00
+#define _SIDD_UNIT_MASK 0x40
+
+/* SSE4.2 Packed Comparison Intrinsics. */
+#define _mm_cmpistrm(A, B, M) \
+ (__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistri(A, B, M) \
+ (int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+
+#define _mm_cmpestrm(A, LA, B, LB, M) \
+ (__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestri(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+
+/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
+#define _mm_cmpistra(A, B, M) \
+ (int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrc(A, B, M) \
+ (int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistro(A, B, M) \
+ (int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrs(A, B, M) \
+ (int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrz(A, B, M) \
+ (int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+
+#define _mm_cmpestra(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestrc(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestro(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestrs(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestrz(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+
+/* SSE4.2 Compare Packed Data -- Greater Than. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
+{
+ return (__m128i)((__v2di)__V1 > (__v2di)__V2);
+}
+
+/* SSE4.2 Accumulate CRC32. */
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u8(unsigned int __C, unsigned char __D)
+{
+ return __builtin_ia32_crc32qi(__C, __D);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u16(unsigned int __C, unsigned short __D)
+{
+ return __builtin_ia32_crc32hi(__C, __D);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u32(unsigned int __C, unsigned int __D)
+{
+ return __builtin_ia32_crc32si(__C, __D);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_crc32_u64(unsigned long long __C, unsigned long long __D)
+{
+ return __builtin_ia32_crc32di(__C, __D);
+}
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
+
+#endif /* _SMMINTRIN_H */
diff --git a/current/clang-include/stdalign.h b/current/clang-include/stdalign.h
new file mode 100644
index 0000000..3738d12
--- /dev/null
+++ b/current/clang-include/stdalign.h
@@ -0,0 +1,35 @@
+/*===---- stdalign.h - Standard header for alignment ------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDALIGN_H
+#define __STDALIGN_H
+
+#ifndef __cplusplus
+#define alignas _Alignas
+#define alignof _Alignof
+#endif
+
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+
+#endif /* __STDALIGN_H */
diff --git a/current/clang-include/stdarg.h b/current/clang-include/stdarg.h
new file mode 100644
index 0000000..a57e183
--- /dev/null
+++ b/current/clang-include/stdarg.h
@@ -0,0 +1,52 @@
+/*===---- stdarg.h - Variable argument handling ----------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDARG_H
+#define __STDARG_H
+
+#ifndef _VA_LIST
+typedef __builtin_va_list va_list;
+#define _VA_LIST
+#endif
+#define va_start(ap, param) __builtin_va_start(ap, param)
+#define va_end(ap) __builtin_va_end(ap)
+#define va_arg(ap, type) __builtin_va_arg(ap, type)
+
+/* GCC always defines __va_copy, but does not define va_copy unless in c99 mode
+ * or -ansi is not specified, since it was not part of C90.
+ */
+#define __va_copy(d,s) __builtin_va_copy(d,s)
+
+#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
+#define va_copy(dest, src) __builtin_va_copy(dest, src)
+#endif
+
+/* Hack required to make standard headers work, at least on Ubuntu */
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST 1
+#endif
+typedef __builtin_va_list __gnuc_va_list;
+
+#endif /* __STDARG_H */
diff --git a/current/clang-include/stdatomic.h b/current/clang-include/stdatomic.h
new file mode 100644
index 0000000..8d573b2
--- /dev/null
+++ b/current/clang-include/stdatomic.h
@@ -0,0 +1,546 @@
+/*-
+ * Copyright (c) 2011 Ed Schouten <ed@FreeBSD.org>
+ * David Chisnall <theraven@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _STDATOMIC_H_
+#define _STDATOMIC_H_
+
+#include <sys/cdefs.h>
+
+#if defined(__cplusplus) && __cplusplus >= 201103L && defined(_USING_LIBCXX)
+# ifdef __clang__
+# if __has_feature(cxx_atomic)
+# define _STDATOMIC_HAVE_ATOMIC
+# endif
+# else /* gcc */
+# define _STDATOMIC_HAVE_ATOMIC
+# endif
+#endif
+
+#ifdef _STDATOMIC_HAVE_ATOMIC
+
+/* We have a usable C++ <atomic>; use it instead. */
+
+#include <atomic>
+
+#undef _Atomic
+ /* Also defined by <atomic> for gcc. But not used in macros. */
+ /* Also a clang intrinsic. */
+ /* Should not be used by client code before this file is */
+ /* included. The definitions in <atomic> themselves see */
+ /* the old definition, as they should. */
+ /* Client code sees the following definition. */
+
+#define _Atomic(t) std::atomic<t>
+
+using std::atomic_is_lock_free;
+using std::atomic_init;
+using std::atomic_store;
+using std::atomic_store_explicit;
+using std::atomic_load;
+using std::atomic_load_explicit;
+using std::atomic_exchange;
+using std::atomic_exchange_explicit;
+using std::atomic_compare_exchange_strong;
+using std::atomic_compare_exchange_strong_explicit;
+using std::atomic_compare_exchange_weak;
+using std::atomic_compare_exchange_weak_explicit;
+using std::atomic_fetch_add;
+using std::atomic_fetch_add_explicit;
+using std::atomic_fetch_sub;
+using std::atomic_fetch_sub_explicit;
+using std::atomic_fetch_or;
+using std::atomic_fetch_or_explicit;
+using std::atomic_fetch_xor;
+using std::atomic_fetch_xor_explicit;
+using std::atomic_fetch_and;
+using std::atomic_fetch_and_explicit;
+using std::atomic_thread_fence;
+using std::atomic_signal_fence;
+
+using std::memory_order;
+using std::memory_order_relaxed;
+using std::memory_order_consume;
+using std::memory_order_acquire;
+using std::memory_order_release;
+using std::memory_order_acq_rel;
+using std::memory_order_seq_cst;
+
+using std::atomic_bool;
+using std::atomic_char;
+using std::atomic_schar;
+using std::atomic_uchar;
+using std::atomic_short;
+using std::atomic_ushort;
+using std::atomic_int;
+using std::atomic_uint;
+using std::atomic_long;
+using std::atomic_ulong;
+using std::atomic_llong;
+using std::atomic_ullong;
+using std::atomic_char16_t;
+using std::atomic_char32_t;
+using std::atomic_wchar_t;
+using std::atomic_int_least8_t;
+using std::atomic_uint_least8_t;
+using std::atomic_int_least16_t;
+using std::atomic_uint_least16_t;
+using std::atomic_int_least32_t;
+using std::atomic_uint_least32_t;
+using std::atomic_int_least64_t;
+using std::atomic_uint_least64_t;
+using std::atomic_int_fast8_t;
+using std::atomic_uint_fast8_t;
+using std::atomic_int_fast16_t;
+using std::atomic_uint_fast16_t;
+using std::atomic_int_fast32_t;
+using std::atomic_uint_fast32_t;
+using std::atomic_int_fast64_t;
+using std::atomic_uint_fast64_t;
+using std::atomic_intptr_t;
+using std::atomic_uintptr_t;
+using std::atomic_size_t;
+using std::atomic_ptrdiff_t;
+using std::atomic_intmax_t;
+using std::atomic_uintmax_t;
+
+#else /* <atomic> unavailable, possibly because this is C, not C++ */
+
+#include <sys/types.h>
+#include <stdbool.h>
+
+/*
+ * C: Do it ourselves.
+ * Note that the runtime representation defined here should be compatible
+ * with the C++ one, i.e. an _Atomic(T) needs to contain the same
+ * bits as a T.
+ */
+
+#include <stddef.h> /* For ptrdiff_t. */
+#include <stdint.h> /* TODO: don't drag in all the macros, just the types. */
+// Include uchar.h only when needed. Bionic's stdatomic.h is also used for the
+// host (via a copy in prebuilts/clang) and uchar.h is not available in the
+// glibc used for the host.
+#if __STDC_VERSION__ >= 201112L
+# include <uchar.h> /* For char16_t and char32_t. */
+#endif
+
+
+#ifdef __clang__
+# if __has_extension(c_atomic) || __has_extension(cxx_atomic)
+# define __CLANG_ATOMICS
+# else
+# error "stdatomic.h does not support your compiler"
+# endif
+# if __has_builtin(__sync_swap)
+# define __HAS_BUILTIN_SYNC_SWAP
+# endif
+#else
+# define __GNUC_ATOMICS
+#endif
+
+/*
+ * 7.17.1 Atomic lock-free macros.
+ */
+
+#ifdef __GCC_ATOMIC_BOOL_LOCK_FREE
+#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_CHAR_LOCK_FREE
+#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_SHORT_LOCK_FREE
+#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_INT_LOCK_FREE
+#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_LONG_LOCK_FREE
+#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_LLONG_LOCK_FREE
+#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
+#endif
+#ifdef __GCC_ATOMIC_POINTER_LOCK_FREE
+#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
+#endif
+
+/*
+ * 7.17.2 Initialization.
+ */
+
+#if defined(__CLANG_ATOMICS)
+#define ATOMIC_VAR_INIT(value) (value)
+#define atomic_init(obj, value) __c11_atomic_init(obj, value)
+#else
+#define ATOMIC_VAR_INIT(value) { .__val = (value) }
+#define atomic_init(obj, value) ((void)((obj)->__val = (value)))
+#endif
+
+/*
+ * Clang and recent GCC both provide predefined macros for the memory
+ * orderings. If we are using a compiler that doesn't define them, use the
+ * clang values - these will be ignored in the fallback path.
+ */
+
+#ifndef __ATOMIC_RELAXED
+#define __ATOMIC_RELAXED 0
+#endif
+#ifndef __ATOMIC_CONSUME
+#define __ATOMIC_CONSUME 1
+#endif
+#ifndef __ATOMIC_ACQUIRE
+#define __ATOMIC_ACQUIRE 2
+#endif
+#ifndef __ATOMIC_RELEASE
+#define __ATOMIC_RELEASE 3
+#endif
+#ifndef __ATOMIC_ACQ_REL
+#define __ATOMIC_ACQ_REL 4
+#endif
+#ifndef __ATOMIC_SEQ_CST
+#define __ATOMIC_SEQ_CST 5
+#endif
+
+/*
+ * 7.17.3 Order and consistency.
+ *
+ * The memory_order_* constants that denote the barrier behaviour of the
+ * atomic operations.
+ * The enum values must be identical to those used by the
+ * C++ <atomic> header.
+ */
+
+typedef enum {
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_consume = __ATOMIC_CONSUME,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+} memory_order;
+
+/*
+ * 7.17.4 Fences.
+ */
+
+static __inline void
+atomic_thread_fence(memory_order __order __attribute__((unused)))
+{
+
+#ifdef __CLANG_ATOMICS
+ __c11_atomic_thread_fence(__order);
+#elif defined(__GNUC_ATOMICS)
+ __atomic_thread_fence(__order);
+#else
+ __sync_synchronize();
+#endif
+}
+
+static __inline void
+atomic_signal_fence(memory_order __order __attribute__((unused)))
+{
+
+#ifdef __CLANG_ATOMICS
+ __c11_atomic_signal_fence(__order);
+#elif defined(__GNUC_ATOMICS)
+ __atomic_signal_fence(__order);
+#else
+ __asm volatile ("" ::: "memory");
+#endif
+}
+
+/*
+ * 7.17.5 Lock-free property.
+ */
+
+#if defined(_KERNEL)
+/* Atomics in kernelspace are always lock-free. */
+#define atomic_is_lock_free(obj) \
+ ((void)(obj), (_Bool)1)
+#elif defined(__CLANG_ATOMICS)
+#define atomic_is_lock_free(obj) \
+ __c11_atomic_is_lock_free(sizeof(*(obj)))
+#elif defined(__GNUC_ATOMICS)
+#define atomic_is_lock_free(obj) \
+ __atomic_is_lock_free(sizeof((obj)->__val), &(obj)->__val)
+#else
+#define atomic_is_lock_free(obj) \
+ ((void)(obj), sizeof((obj)->__val) <= sizeof(void *))
+#endif
+
+/*
+ * 7.17.6 Atomic integer types.
+ */
+
+#ifndef __CLANG_ATOMICS
+/*
+ * No native support for _Atomic(). Place object in structure to prevent
+ * most forms of direct non-atomic access.
+ */
+#define _Atomic(T) struct { T volatile __val; }
+#endif
+
+typedef _Atomic(bool) atomic_bool;
+typedef _Atomic(char) atomic_char;
+typedef _Atomic(signed char) atomic_schar;
+typedef _Atomic(unsigned char) atomic_uchar;
+typedef _Atomic(short) atomic_short;
+typedef _Atomic(unsigned short) atomic_ushort;
+typedef _Atomic(int) atomic_int;
+typedef _Atomic(unsigned int) atomic_uint;
+typedef _Atomic(long) atomic_long;
+typedef _Atomic(unsigned long) atomic_ulong;
+typedef _Atomic(long long) atomic_llong;
+typedef _Atomic(unsigned long long) atomic_ullong;
+#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L
+ typedef _Atomic(char16_t) atomic_char16_t;
+ typedef _Atomic(char32_t) atomic_char32_t;
+#endif
+typedef _Atomic(wchar_t) atomic_wchar_t;
+typedef _Atomic(int_least8_t) atomic_int_least8_t;
+typedef _Atomic(uint_least8_t) atomic_uint_least8_t;
+typedef _Atomic(int_least16_t) atomic_int_least16_t;
+typedef _Atomic(uint_least16_t) atomic_uint_least16_t;
+typedef _Atomic(int_least32_t) atomic_int_least32_t;
+typedef _Atomic(uint_least32_t) atomic_uint_least32_t;
+typedef _Atomic(int_least64_t) atomic_int_least64_t;
+typedef _Atomic(uint_least64_t) atomic_uint_least64_t;
+typedef _Atomic(int_fast8_t) atomic_int_fast8_t;
+typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t;
+typedef _Atomic(int_fast16_t) atomic_int_fast16_t;
+typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t;
+typedef _Atomic(int_fast32_t) atomic_int_fast32_t;
+typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t;
+typedef _Atomic(int_fast64_t) atomic_int_fast64_t;
+typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t;
+typedef _Atomic(intptr_t) atomic_intptr_t;
+typedef _Atomic(uintptr_t) atomic_uintptr_t;
+typedef _Atomic(size_t) atomic_size_t;
+typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;
+typedef _Atomic(intmax_t) atomic_intmax_t;
+typedef _Atomic(uintmax_t) atomic_uintmax_t;
+
+/*
+ * 7.17.7 Operations on atomic types.
+ */
+
+/*
+ * Compiler-specific operations.
+ */
+
+#if defined(__CLANG_ATOMICS)
+#define atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure) \
+ __c11_atomic_compare_exchange_strong(object, expected, desired, \
+ success, failure)
+#define atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, success, failure) \
+ __c11_atomic_compare_exchange_weak(object, expected, desired, \
+ success, failure)
+#define atomic_exchange_explicit(object, desired, order) \
+ __c11_atomic_exchange(object, desired, order)
+#define atomic_fetch_add_explicit(object, operand, order) \
+ __c11_atomic_fetch_add(object, operand, order)
+#define atomic_fetch_and_explicit(object, operand, order) \
+ __c11_atomic_fetch_and(object, operand, order)
+#define atomic_fetch_or_explicit(object, operand, order) \
+ __c11_atomic_fetch_or(object, operand, order)
+#define atomic_fetch_sub_explicit(object, operand, order) \
+ __c11_atomic_fetch_sub(object, operand, order)
+#define atomic_fetch_xor_explicit(object, operand, order) \
+ __c11_atomic_fetch_xor(object, operand, order)
+#define atomic_load_explicit(object, order) \
+ __c11_atomic_load(object, order)
+#define atomic_store_explicit(object, desired, order) \
+ __c11_atomic_store(object, desired, order)
+#elif defined(__GNUC_ATOMICS)
+#define atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure) \
+ __atomic_compare_exchange_n(&(object)->__val, expected, \
+ desired, 0, success, failure)
+#define atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, success, failure) \
+ __atomic_compare_exchange_n(&(object)->__val, expected, \
+ desired, 1, success, failure)
+#define atomic_exchange_explicit(object, desired, order) \
+ __atomic_exchange_n(&(object)->__val, desired, order)
+#define atomic_fetch_add_explicit(object, operand, order) \
+ __atomic_fetch_add(&(object)->__val, operand, order)
+#define atomic_fetch_and_explicit(object, operand, order) \
+ __atomic_fetch_and(&(object)->__val, operand, order)
+#define atomic_fetch_or_explicit(object, operand, order) \
+ __atomic_fetch_or(&(object)->__val, operand, order)
+#define atomic_fetch_sub_explicit(object, operand, order) \
+ __atomic_fetch_sub(&(object)->__val, operand, order)
+#define atomic_fetch_xor_explicit(object, operand, order) \
+ __atomic_fetch_xor(&(object)->__val, operand, order)
+#define atomic_load_explicit(object, order) \
+ __atomic_load_n(&(object)->__val, order)
+#define atomic_store_explicit(object, desired, order) \
+ __atomic_store_n(&(object)->__val, desired, order)
+#else
+#define __atomic_apply_stride(object, operand) \
+ (((__typeof__((object)->__val))0) + (operand))
+#define atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure) __extension__ ({ \
+ __typeof__(expected) __ep = (expected); \
+ __typeof__(*__ep) __e = *__ep; \
+ (void)(success); (void)(failure); \
+ (bool)((*__ep = __sync_val_compare_and_swap(&(object)->__val, \
+ __e, desired)) == __e); \
+})
+#define atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, success, failure) \
+ atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, success, failure)
+#ifdef __HAS_BUILTIN_SYNC_SWAP
+/* Clang provides a full-barrier atomic exchange - use it if available. */
+#define atomic_exchange_explicit(object, desired, order) \
+ ((void)(order), __sync_swap(&(object)->__val, desired))
+#else
+/*
+ * __sync_lock_test_and_set() is only an acquire barrier in theory (although in
+ * practice it is usually a full barrier) so we need an explicit barrier before
+ * it.
+ */
+#define atomic_exchange_explicit(object, desired, order) \
+__extension__ ({ \
+ __typeof__(object) __o = (object); \
+ __typeof__(desired) __d = (desired); \
+ (void)(order); \
+ __sync_synchronize(); \
+ __sync_lock_test_and_set(&(__o)->__val, __d); \
+})
+#endif
+#define atomic_fetch_add_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_add(&(object)->__val, \
+ __atomic_apply_stride(object, operand)))
+#define atomic_fetch_and_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_and(&(object)->__val, operand))
+#define atomic_fetch_or_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_or(&(object)->__val, operand))
+#define atomic_fetch_sub_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_sub(&(object)->__val, \
+ __atomic_apply_stride(object, operand)))
+#define atomic_fetch_xor_explicit(object, operand, order) \
+ ((void)(order), __sync_fetch_and_xor(&(object)->__val, operand))
+#define atomic_load_explicit(object, order) \
+ ((void)(order), __sync_fetch_and_add(&(object)->__val, 0))
+#define atomic_store_explicit(object, desired, order) \
+ ((void)atomic_exchange_explicit(object, desired, order))
+#endif
+
+/*
+ * Convenience functions.
+ *
+ * Don't provide these in kernel space. In kernel space, we should be
+ * disciplined enough to always provide explicit barriers.
+ */
+
+#ifndef _KERNEL
+#define atomic_compare_exchange_strong(object, expected, desired) \
+ atomic_compare_exchange_strong_explicit(object, expected, \
+ desired, memory_order_seq_cst, memory_order_seq_cst)
+#define atomic_compare_exchange_weak(object, expected, desired) \
+ atomic_compare_exchange_weak_explicit(object, expected, \
+ desired, memory_order_seq_cst, memory_order_seq_cst)
+#define atomic_exchange(object, desired) \
+ atomic_exchange_explicit(object, desired, memory_order_seq_cst)
+#define atomic_fetch_add(object, operand) \
+ atomic_fetch_add_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_and(object, operand) \
+ atomic_fetch_and_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_or(object, operand) \
+ atomic_fetch_or_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_sub(object, operand) \
+ atomic_fetch_sub_explicit(object, operand, memory_order_seq_cst)
+#define atomic_fetch_xor(object, operand) \
+ atomic_fetch_xor_explicit(object, operand, memory_order_seq_cst)
+#define atomic_load(object) \
+ atomic_load_explicit(object, memory_order_seq_cst)
+#define atomic_store(object, desired) \
+ atomic_store_explicit(object, desired, memory_order_seq_cst)
+#endif /* !_KERNEL */
+
+/*
+ * 7.17.8 Atomic flag type and operations.
+ *
+ * XXX: Assume atomic_bool can be used as an atomic_flag. Is there some
+ * kind of compiler built-in type we could use?
+ */
+
+typedef struct {
+ atomic_bool __flag;
+} atomic_flag;
+
+#define ATOMIC_FLAG_INIT { ATOMIC_VAR_INIT(false) }
+
+static __inline bool
+atomic_flag_test_and_set_explicit(volatile atomic_flag *__object,
+ memory_order __order)
+{
+ return (atomic_exchange_explicit(&__object->__flag, 1, __order));
+}
+
+static __inline void
+atomic_flag_clear_explicit(volatile atomic_flag *__object, memory_order __order)
+{
+
+ atomic_store_explicit(&__object->__flag, 0, __order);
+}
+
+#ifndef _KERNEL
+static __inline bool
+atomic_flag_test_and_set(volatile atomic_flag *__object)
+{
+
+ return (atomic_flag_test_and_set_explicit(__object,
+ memory_order_seq_cst));
+}
+
+static __inline void
+atomic_flag_clear(volatile atomic_flag *__object)
+{
+
+ atomic_flag_clear_explicit(__object, memory_order_seq_cst);
+}
+#endif /* !_KERNEL */
+
+#endif /* <atomic> unavailable */
+
+#endif /* !_STDATOMIC_H_ */
diff --git a/current/clang-include/stdbool.h b/current/clang-include/stdbool.h
new file mode 100644
index 0000000..0467893
--- /dev/null
+++ b/current/clang-include/stdbool.h
@@ -0,0 +1,44 @@
+/*===---- stdbool.h - Standard header for booleans -------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDBOOL_H
+#define __STDBOOL_H
+
+/* Don't define bool, true, and false in C++, except as a GNU extension. */
+#ifndef __cplusplus
+#define bool _Bool
+#define true 1
+#define false 0
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Define _Bool, bool, false, true as a GNU extension. */
+#define _Bool bool
+#define bool bool
+#define false false
+#define true true
+#endif
+
+#define __bool_true_false_are_defined 1
+
+#endif /* __STDBOOL_H */
diff --git a/current/clang-include/stddef.h b/current/clang-include/stddef.h
new file mode 100644
index 0000000..7354996
--- /dev/null
+++ b/current/clang-include/stddef.h
@@ -0,0 +1,137 @@
+/*===---- stddef.h - Basic type definitions --------------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined(__STDDEF_H) || defined(__need_ptrdiff_t) || \
+ defined(__need_size_t) || defined(__need_wchar_t) || \
+ defined(__need_NULL) || defined(__need_wint_t)
+
+#if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \
+ !defined(__need_wchar_t) && !defined(__need_NULL) && \
+ !defined(__need_wint_t)
+/* Always define miscellaneous pieces when modules are available. */
+#if !__has_feature(modules)
+#define __STDDEF_H
+#endif
+#define __need_ptrdiff_t
+#define __need_size_t
+#define __need_wchar_t
+#define __need_NULL
+#define __need_STDDEF_H_misc
+/* __need_wint_t is intentionally not defined here. */
+#endif
+
+#if defined(__need_ptrdiff_t)
+#if !defined(_PTRDIFF_T) || __has_feature(modules)
+/* Always define ptrdiff_t when modules are available. */
+#if !__has_feature(modules)
+#define _PTRDIFF_T
+#endif
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+#endif
+#undef __need_ptrdiff_t
+#endif /* defined(__need_ptrdiff_t) */
+
+#if defined(__need_size_t)
+#if !defined(_SIZE_T) || __has_feature(modules)
+/* Always define size_t when modules are available. */
+#if !__has_feature(modules)
+#define _SIZE_T
+#endif
+typedef __SIZE_TYPE__ size_t;
+#endif
+#undef __need_size_t
+#endif /*defined(__need_size_t) */
+
+#if defined(__need_STDDEF_H_misc)
+/* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is
+ * enabled. */
+#if (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1 && \
+ !defined(_RSIZE_T)) || __has_feature(modules)
+/* Always define rsize_t when modules are available. */
+#if !__has_feature(modules)
+#define _RSIZE_T
+#endif
+typedef __SIZE_TYPE__ rsize_t;
+#endif
+#endif /* defined(__need_STDDEF_H_misc) */
+
+#if defined(__need_wchar_t)
+#ifndef __cplusplus
+/* Always define wchar_t when modules are available. */
+#if !defined(_WCHAR_T) || __has_feature(modules)
+#if !__has_feature(modules)
+#define _WCHAR_T
+#if defined(_MSC_EXTENSIONS)
+#define _WCHAR_T_DEFINED
+#endif
+#endif
+typedef __WCHAR_TYPE__ wchar_t;
+#endif
+#endif
+#undef __need_wchar_t
+#endif /* defined(__need_wchar_t) */
+
+#if defined(__need_NULL)
+#undef NULL
+#ifdef __cplusplus
+# if !defined(__MINGW32__) && !defined(_MSC_VER)
+# define NULL __null
+# else
+# define NULL 0
+# endif
+#else
+# define NULL ((void*)0)
+#endif
+#ifdef __cplusplus
+#if defined(_MSC_EXTENSIONS) && defined(_NATIVE_NULLPTR_SUPPORTED)
+namespace std { typedef decltype(nullptr) nullptr_t; }
+using ::std::nullptr_t;
+#endif
+#endif
+#undef __need_NULL
+#endif /* defined(__need_NULL) */
+
+#if defined(__need_STDDEF_H_misc)
+#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L
+#include "__stddef_max_align_t.h"
+#endif
+#define offsetof(t, d) __builtin_offsetof(t, d)
+#undef __need_STDDEF_H_misc
+#endif /* defined(__need_STDDEF_H_misc) */
+
+/* Some C libraries expect to see a wint_t here. Others (notably MinGW) will use
+__WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */
+#if defined(__need_wint_t)
+/* Always define wint_t when modules are available. */
+#if !defined(_WINT_T) || __has_feature(modules)
+#if !__has_feature(modules)
+#define _WINT_T
+#endif
+typedef __WINT_TYPE__ wint_t;
+#endif
+#undef __need_wint_t
+#endif /* __need_wint_t */
+
+#endif
diff --git a/current/clang-include/stdint.h b/current/clang-include/stdint.h
new file mode 100644
index 0000000..3f2fcbc
--- /dev/null
+++ b/current/clang-include/stdint.h
@@ -0,0 +1,707 @@
+/*===---- stdint.h - Standard header for sized integer types --------------===*\
+ *
+ * Copyright (c) 2009 Chris Lattner
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_STDINT_H
+#define __CLANG_STDINT_H
+
+/* If we're hosted, fall back to the system's stdint.h, which might have
+ * additional definitions.
+ */
+#if __STDC_HOSTED__ && __has_include_next(<stdint.h>)
+
+// C99 7.18.3 Limits of other integer types
+//
+// Footnote 219, 220: C++ implementations should define these macros only when
+// __STDC_LIMIT_MACROS is defined before <stdint.h> is included.
+//
+// Footnote 222: C++ implementations should define these macros only when
+// __STDC_CONSTANT_MACROS is defined before <stdint.h> is included.
+//
+// C++11 [cstdint.syn]p2:
+//
+// The macros defined by <cstdint> are provided unconditionally. In particular,
+// the symbols __STDC_LIMIT_MACROS and __STDC_CONSTANT_MACROS (mentioned in
+// footnotes 219, 220, and 222 in the C standard) play no role in C++.
+//
+// C11 removed the problematic footnotes.
+//
+// Work around this inconsistency by always defining those macros in C++ mode,
+// so that a C library implementation which follows the C99 standard can be
+// used in C++.
+# ifdef __cplusplus
+# if !defined(__STDC_LIMIT_MACROS)
+# define __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
+# endif
+# if !defined(__STDC_CONSTANT_MACROS)
+# define __STDC_CONSTANT_MACROS
+# define __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
+# endif
+# endif
+
+# include_next <stdint.h>
+
+# ifdef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
+# undef __STDC_LIMIT_MACROS
+# undef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
+# endif
+# ifdef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
+# undef __STDC_CONSTANT_MACROS
+# undef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
+# endif
+
+#else
+
+/* C99 7.18.1.1 Exact-width integer types.
+ * C99 7.18.1.2 Minimum-width integer types.
+ * C99 7.18.1.3 Fastest minimum-width integer types.
+ *
+ * The standard requires that exact-width type be defined for 8-, 16-, 32-, and
+ * 64-bit types if they are implemented. Other exact width types are optional.
+ * This implementation defines an exact-width types for every integer width
+ * that is represented in the standard integer types.
+ *
+ * The standard also requires minimum-width types be defined for 8-, 16-, 32-,
+ * and 64-bit widths regardless of whether there are corresponding exact-width
+ * types.
+ *
+ * To accommodate targets that are missing types that are exactly 8, 16, 32, or
+ * 64 bits wide, this implementation takes an approach of cascading
+ * redefintions, redefining __int_leastN_t to successively smaller exact-width
+ * types. It is therefore important that the types are defined in order of
+ * descending widths.
+ *
+ * We currently assume that the minimum-width types and the fastest
+ * minimum-width types are the same. This is allowed by the standard, but is
+ * suboptimal.
+ *
+ * In violation of the standard, some targets do not implement a type that is
+ * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit).
+ * To accommodate these targets, a required minimum-width type is only
+ * defined if there exists an exact-width type of equal or greater width.
+ */
+
+#ifdef __INT64_TYPE__
+# ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/
+typedef __INT64_TYPE__ int64_t;
+# endif /* __int8_t_defined */
+typedef __UINT64_TYPE__ uint64_t;
+# define __int_least64_t int64_t
+# define __uint_least64_t uint64_t
+# define __int_least32_t int64_t
+# define __uint_least32_t uint64_t
+# define __int_least16_t int64_t
+# define __uint_least16_t uint64_t
+# define __int_least8_t int64_t
+# define __uint_least8_t uint64_t
+#endif /* __INT64_TYPE__ */
+
+#ifdef __int_least64_t
+typedef __int_least64_t int_least64_t;
+typedef __uint_least64_t uint_least64_t;
+typedef __int_least64_t int_fast64_t;
+typedef __uint_least64_t uint_fast64_t;
+#endif /* __int_least64_t */
+
+#ifdef __INT56_TYPE__
+typedef __INT56_TYPE__ int56_t;
+typedef __UINT56_TYPE__ uint56_t;
+typedef int56_t int_least56_t;
+typedef uint56_t uint_least56_t;
+typedef int56_t int_fast56_t;
+typedef uint56_t uint_fast56_t;
+# define __int_least32_t int56_t
+# define __uint_least32_t uint56_t
+# define __int_least16_t int56_t
+# define __uint_least16_t uint56_t
+# define __int_least8_t int56_t
+# define __uint_least8_t uint56_t
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+typedef __INT48_TYPE__ int48_t;
+typedef __UINT48_TYPE__ uint48_t;
+typedef int48_t int_least48_t;
+typedef uint48_t uint_least48_t;
+typedef int48_t int_fast48_t;
+typedef uint48_t uint_fast48_t;
+# define __int_least32_t int48_t
+# define __uint_least32_t uint48_t
+# define __int_least16_t int48_t
+# define __uint_least16_t uint48_t
+# define __int_least8_t int48_t
+# define __uint_least8_t uint48_t
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+typedef __INT40_TYPE__ int40_t;
+typedef __UINT40_TYPE__ uint40_t;
+typedef int40_t int_least40_t;
+typedef uint40_t uint_least40_t;
+typedef int40_t int_fast40_t;
+typedef uint40_t uint_fast40_t;
+# define __int_least32_t int40_t
+# define __uint_least32_t uint40_t
+# define __int_least16_t int40_t
+# define __uint_least16_t uint40_t
+# define __int_least8_t int40_t
+# define __uint_least8_t uint40_t
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+
+# ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/
+typedef __INT32_TYPE__ int32_t;
+# endif /* __int8_t_defined */
+
+# ifndef __uint32_t_defined /* more glibc compatibility */
+# define __uint32_t_defined
+typedef __UINT32_TYPE__ uint32_t;
+# endif /* __uint32_t_defined */
+
+# define __int_least32_t int32_t
+# define __uint_least32_t uint32_t
+# define __int_least16_t int32_t
+# define __uint_least16_t uint32_t
+# define __int_least8_t int32_t
+# define __uint_least8_t uint32_t
+#endif /* __INT32_TYPE__ */
+
+#ifdef __int_least32_t
+typedef __int_least32_t int_least32_t;
+typedef __uint_least32_t uint_least32_t;
+typedef __int_least32_t int_fast32_t;
+typedef __uint_least32_t uint_fast32_t;
+#endif /* __int_least32_t */
+
+#ifdef __INT24_TYPE__
+typedef __INT24_TYPE__ int24_t;
+typedef __UINT24_TYPE__ uint24_t;
+typedef int24_t int_least24_t;
+typedef uint24_t uint_least24_t;
+typedef int24_t int_fast24_t;
+typedef uint24_t uint_fast24_t;
+# define __int_least16_t int24_t
+# define __uint_least16_t uint24_t
+# define __int_least8_t int24_t
+# define __uint_least8_t uint24_t
+#endif /* __INT24_TYPE__ */
+
+#ifdef __INT16_TYPE__
+#ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/
+typedef __INT16_TYPE__ int16_t;
+#endif /* __int8_t_defined */
+typedef __UINT16_TYPE__ uint16_t;
+# define __int_least16_t int16_t
+# define __uint_least16_t uint16_t
+# define __int_least8_t int16_t
+# define __uint_least8_t uint16_t
+#endif /* __INT16_TYPE__ */
+
+#ifdef __int_least16_t
+typedef __int_least16_t int_least16_t;
+typedef __uint_least16_t uint_least16_t;
+typedef __int_least16_t int_fast16_t;
+typedef __uint_least16_t uint_fast16_t;
+#endif /* __int_least16_t */
+
+
+#ifdef __INT8_TYPE__
+#ifndef __int8_t_defined /* glibc sys/types.h also defines int8_t*/
+typedef __INT8_TYPE__ int8_t;
+#endif /* __int8_t_defined */
+typedef __UINT8_TYPE__ uint8_t;
+# define __int_least8_t int8_t
+# define __uint_least8_t uint8_t
+#endif /* __INT8_TYPE__ */
+
+#ifdef __int_least8_t
+typedef __int_least8_t int_least8_t;
+typedef __uint_least8_t uint_least8_t;
+typedef __int_least8_t int_fast8_t;
+typedef __uint_least8_t uint_fast8_t;
+#endif /* __int_least8_t */
+
+/* prevent glibc sys/types.h from defining conflicting types */
+#ifndef __int8_t_defined
+# define __int8_t_defined
+#endif /* __int8_t_defined */
+
+/* C99 7.18.1.4 Integer types capable of holding object pointers.
+ */
+#define __stdint_join3(a,b,c) a ## b ## c
+
+#define __intn_t(n) __stdint_join3( int, n, _t)
+#define __uintn_t(n) __stdint_join3(uint, n, _t)
+
+#ifndef _INTPTR_T
+#ifndef __intptr_t_defined
+typedef __intn_t(__INTPTR_WIDTH__) intptr_t;
+#define __intptr_t_defined
+#define _INTPTR_T
+#endif
+#endif
+
+#ifndef _UINTPTR_T
+typedef __uintn_t(__INTPTR_WIDTH__) uintptr_t;
+#define _UINTPTR_T
+#endif
+
+/* C99 7.18.1.5 Greatest-width integer types.
+ */
+typedef __INTMAX_TYPE__ intmax_t;
+typedef __UINTMAX_TYPE__ uintmax_t;
+
+/* C99 7.18.4 Macros for minimum-width integer constants.
+ *
+ * The standard requires that integer constant macros be defined for all the
+ * minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width
+ * types are required, the corresponding integer constant macros are defined
+ * here. This implementation also defines minimum-width types for every other
+ * integer width that the target implements, so corresponding macros are
+ * defined below, too.
+ *
+ * These macros are defined using the same successive-shrinking approach as
+ * the type definitions above. It is likewise important that macros are defined
+ * in order of decending width.
+ *
+ * Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the
+ * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
+ */
+
+#define __int_c_join(a, b) a ## b
+#define __int_c(v, suffix) __int_c_join(v, suffix)
+#define __uint_c(v, suffix) __int_c_join(v##U, suffix)
+
+
+#ifdef __INT64_TYPE__
+# ifdef __INT64_C_SUFFIX__
+# define __int64_c_suffix __INT64_C_SUFFIX__
+# define __int32_c_suffix __INT64_C_SUFFIX__
+# define __int16_c_suffix __INT64_C_SUFFIX__
+# define __int8_c_suffix __INT64_C_SUFFIX__
+# else
+# undef __int64_c_suffix
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT64_C_SUFFIX__ */
+#endif /* __INT64_TYPE__ */
+
+#ifdef __int_least64_t
+# ifdef __int64_c_suffix
+# define INT64_C(v) __int_c(v, __int64_c_suffix)
+# define UINT64_C(v) __uint_c(v, __int64_c_suffix)
+# else
+# define INT64_C(v) v
+# define UINT64_C(v) v ## U
+# endif /* __int64_c_suffix */
+#endif /* __int_least64_t */
+
+
+#ifdef __INT56_TYPE__
+# ifdef __INT56_C_SUFFIX__
+# define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
+# define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
+# define __int32_c_suffix __INT56_C_SUFFIX__
+# define __int16_c_suffix __INT56_C_SUFFIX__
+# define __int8_c_suffix __INT56_C_SUFFIX__
+# else
+# define INT56_C(v) v
+# define UINT56_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT56_C_SUFFIX__ */
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+# ifdef __INT48_C_SUFFIX__
+# define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
+# define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
+# define __int32_c_suffix __INT48_C_SUFFIX__
+# define __int16_c_suffix __INT48_C_SUFFIX__
+# define __int8_c_suffix __INT48_C_SUFFIX__
+# else
+# define INT48_C(v) v
+# define UINT48_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT48_C_SUFFIX__ */
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+# ifdef __INT40_C_SUFFIX__
+# define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
+# define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
+# define __int32_c_suffix __INT40_C_SUFFIX__
+# define __int16_c_suffix __INT40_C_SUFFIX__
+# define __int8_c_suffix __INT40_C_SUFFIX__
+# else
+# define INT40_C(v) v
+# define UINT40_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT40_C_SUFFIX__ */
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+# ifdef __INT32_C_SUFFIX__
+# define __int32_c_suffix __INT32_C_SUFFIX__
+# define __int16_c_suffix __INT32_C_SUFFIX__
+# define __int8_c_suffix __INT32_C_SUFFIX__
+#else
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT32_C_SUFFIX__ */
+#endif /* __INT32_TYPE__ */
+
+#ifdef __int_least32_t
+# ifdef __int32_c_suffix
+# define INT32_C(v) __int_c(v, __int32_c_suffix)
+# define UINT32_C(v) __uint_c(v, __int32_c_suffix)
+# else
+# define INT32_C(v) v
+# define UINT32_C(v) v ## U
+# endif /* __int32_c_suffix */
+#endif /* __int_least32_t */
+
+
+#ifdef __INT24_TYPE__
+# ifdef __INT24_C_SUFFIX__
+# define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
+# define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
+# define __int16_c_suffix __INT24_C_SUFFIX__
+# define __int8_c_suffix __INT24_C_SUFFIX__
+# else
+# define INT24_C(v) v
+# define UINT24_C(v) v ## U
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT24_C_SUFFIX__ */
+#endif /* __INT24_TYPE__ */
+
+
+#ifdef __INT16_TYPE__
+# ifdef __INT16_C_SUFFIX__
+# define __int16_c_suffix __INT16_C_SUFFIX__
+# define __int8_c_suffix __INT16_C_SUFFIX__
+#else
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT16_C_SUFFIX__ */
+#endif /* __INT16_TYPE__ */
+
+#ifdef __int_least16_t
+# ifdef __int16_c_suffix
+# define INT16_C(v) __int_c(v, __int16_c_suffix)
+# define UINT16_C(v) __uint_c(v, __int16_c_suffix)
+# else
+# define INT16_C(v) v
+# define UINT16_C(v) v ## U
+# endif /* __int16_c_suffix */
+#endif /* __int_least16_t */
+
+
+#ifdef __INT8_TYPE__
+# ifdef __INT8_C_SUFFIX__
+# define __int8_c_suffix __INT8_C_SUFFIX__
+#else
+# undef __int8_c_suffix
+# endif /* __INT8_C_SUFFIX__ */
+#endif /* __INT8_TYPE__ */
+
+#ifdef __int_least8_t
+# ifdef __int8_c_suffix
+# define INT8_C(v) __int_c(v, __int8_c_suffix)
+# define UINT8_C(v) __uint_c(v, __int8_c_suffix)
+# else
+# define INT8_C(v) v
+# define UINT8_C(v) v ## U
+# endif /* __int8_c_suffix */
+#endif /* __int_least8_t */
+
+
+/* C99 7.18.2.1 Limits of exact-width integer types.
+ * C99 7.18.2.2 Limits of minimum-width integer types.
+ * C99 7.18.2.3 Limits of fastest minimum-width integer types.
+ *
+ * The presence of limit macros are completely optional in C99. This
+ * implementation defines limits for all of the types (exact- and
+ * minimum-width) that it defines above, using the limits of the minimum-width
+ * type for any types that do not have exact-width representations.
+ *
+ * As in the type definitions, this section takes an approach of
+ * successive-shrinking to determine which limits to use for the standard (8,
+ * 16, 32, 64) bit widths when they don't have exact representations. It is
+ * therefore important that the defintions be kept in order of decending
+ * widths.
+ *
+ * Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the
+ * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
+ */
+
+#ifdef __INT64_TYPE__
+# define INT64_MAX INT64_C( 9223372036854775807)
+# define INT64_MIN (-INT64_C( 9223372036854775807)-1)
+# define UINT64_MAX UINT64_C(18446744073709551615)
+# define __INT_LEAST64_MIN INT64_MIN
+# define __INT_LEAST64_MAX INT64_MAX
+# define __UINT_LEAST64_MAX UINT64_MAX
+# define __INT_LEAST32_MIN INT64_MIN
+# define __INT_LEAST32_MAX INT64_MAX
+# define __UINT_LEAST32_MAX UINT64_MAX
+# define __INT_LEAST16_MIN INT64_MIN
+# define __INT_LEAST16_MAX INT64_MAX
+# define __UINT_LEAST16_MAX UINT64_MAX
+# define __INT_LEAST8_MIN INT64_MIN
+# define __INT_LEAST8_MAX INT64_MAX
+# define __UINT_LEAST8_MAX UINT64_MAX
+#endif /* __INT64_TYPE__ */
+
+#ifdef __INT_LEAST64_MIN
+# define INT_LEAST64_MIN __INT_LEAST64_MIN
+# define INT_LEAST64_MAX __INT_LEAST64_MAX
+# define UINT_LEAST64_MAX __UINT_LEAST64_MAX
+# define INT_FAST64_MIN __INT_LEAST64_MIN
+# define INT_FAST64_MAX __INT_LEAST64_MAX
+# define UINT_FAST64_MAX __UINT_LEAST64_MAX
+#endif /* __INT_LEAST64_MIN */
+
+
+#ifdef __INT56_TYPE__
+# define INT56_MAX INT56_C(36028797018963967)
+# define INT56_MIN (-INT56_C(36028797018963967)-1)
+# define UINT56_MAX UINT56_C(72057594037927935)
+# define INT_LEAST56_MIN INT56_MIN
+# define INT_LEAST56_MAX INT56_MAX
+# define UINT_LEAST56_MAX UINT56_MAX
+# define INT_FAST56_MIN INT56_MIN
+# define INT_FAST56_MAX INT56_MAX
+# define UINT_FAST56_MAX UINT56_MAX
+# define __INT_LEAST32_MIN INT56_MIN
+# define __INT_LEAST32_MAX INT56_MAX
+# define __UINT_LEAST32_MAX UINT56_MAX
+# define __INT_LEAST16_MIN INT56_MIN
+# define __INT_LEAST16_MAX INT56_MAX
+# define __UINT_LEAST16_MAX UINT56_MAX
+# define __INT_LEAST8_MIN INT56_MIN
+# define __INT_LEAST8_MAX INT56_MAX
+# define __UINT_LEAST8_MAX UINT56_MAX
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+# define INT48_MAX INT48_C(140737488355327)
+# define INT48_MIN (-INT48_C(140737488355327)-1)
+# define UINT48_MAX UINT48_C(281474976710655)
+# define INT_LEAST48_MIN INT48_MIN
+# define INT_LEAST48_MAX INT48_MAX
+# define UINT_LEAST48_MAX UINT48_MAX
+# define INT_FAST48_MIN INT48_MIN
+# define INT_FAST48_MAX INT48_MAX
+# define UINT_FAST48_MAX UINT48_MAX
+# define __INT_LEAST32_MIN INT48_MIN
+# define __INT_LEAST32_MAX INT48_MAX
+# define __UINT_LEAST32_MAX UINT48_MAX
+# define __INT_LEAST16_MIN INT48_MIN
+# define __INT_LEAST16_MAX INT48_MAX
+# define __UINT_LEAST16_MAX UINT48_MAX
+# define __INT_LEAST8_MIN INT48_MIN
+# define __INT_LEAST8_MAX INT48_MAX
+# define __UINT_LEAST8_MAX UINT48_MAX
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+# define INT40_MAX INT40_C(549755813887)
+# define INT40_MIN (-INT40_C(549755813887)-1)
+# define UINT40_MAX UINT40_C(1099511627775)
+# define INT_LEAST40_MIN INT40_MIN
+# define INT_LEAST40_MAX INT40_MAX
+# define UINT_LEAST40_MAX UINT40_MAX
+# define INT_FAST40_MIN INT40_MIN
+# define INT_FAST40_MAX INT40_MAX
+# define UINT_FAST40_MAX UINT40_MAX
+# define __INT_LEAST32_MIN INT40_MIN
+# define __INT_LEAST32_MAX INT40_MAX
+# define __UINT_LEAST32_MAX UINT40_MAX
+# define __INT_LEAST16_MIN INT40_MIN
+# define __INT_LEAST16_MAX INT40_MAX
+# define __UINT_LEAST16_MAX UINT40_MAX
+# define __INT_LEAST8_MIN INT40_MIN
+# define __INT_LEAST8_MAX INT40_MAX
+# define __UINT_LEAST8_MAX UINT40_MAX
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+# define INT32_MAX INT32_C(2147483647)
+# define INT32_MIN (-INT32_C(2147483647)-1)
+# define UINT32_MAX UINT32_C(4294967295)
+# define __INT_LEAST32_MIN INT32_MIN
+# define __INT_LEAST32_MAX INT32_MAX
+# define __UINT_LEAST32_MAX UINT32_MAX
+# define __INT_LEAST16_MIN INT32_MIN
+# define __INT_LEAST16_MAX INT32_MAX
+# define __UINT_LEAST16_MAX UINT32_MAX
+# define __INT_LEAST8_MIN INT32_MIN
+# define __INT_LEAST8_MAX INT32_MAX
+# define __UINT_LEAST8_MAX UINT32_MAX
+#endif /* __INT32_TYPE__ */
+
+#ifdef __INT_LEAST32_MIN
+# define INT_LEAST32_MIN __INT_LEAST32_MIN
+# define INT_LEAST32_MAX __INT_LEAST32_MAX
+# define UINT_LEAST32_MAX __UINT_LEAST32_MAX
+# define INT_FAST32_MIN __INT_LEAST32_MIN
+# define INT_FAST32_MAX __INT_LEAST32_MAX
+# define UINT_FAST32_MAX __UINT_LEAST32_MAX
+#endif /* __INT_LEAST32_MIN */
+
+
+#ifdef __INT24_TYPE__
+# define INT24_MAX INT24_C(8388607)
+# define INT24_MIN (-INT24_C(8388607)-1)
+# define UINT24_MAX UINT24_C(16777215)
+# define INT_LEAST24_MIN INT24_MIN
+# define INT_LEAST24_MAX INT24_MAX
+# define UINT_LEAST24_MAX UINT24_MAX
+# define INT_FAST24_MIN INT24_MIN
+# define INT_FAST24_MAX INT24_MAX
+# define UINT_FAST24_MAX UINT24_MAX
+# define __INT_LEAST16_MIN INT24_MIN
+# define __INT_LEAST16_MAX INT24_MAX
+# define __UINT_LEAST16_MAX UINT24_MAX
+# define __INT_LEAST8_MIN INT24_MIN
+# define __INT_LEAST8_MAX INT24_MAX
+# define __UINT_LEAST8_MAX UINT24_MAX
+#endif /* __INT24_TYPE__ */
+
+
+#ifdef __INT16_TYPE__
+#define INT16_MAX INT16_C(32767)
+#define INT16_MIN (-INT16_C(32767)-1)
+#define UINT16_MAX UINT16_C(65535)
+# define __INT_LEAST16_MIN INT16_MIN
+# define __INT_LEAST16_MAX INT16_MAX
+# define __UINT_LEAST16_MAX UINT16_MAX
+# define __INT_LEAST8_MIN INT16_MIN
+# define __INT_LEAST8_MAX INT16_MAX
+# define __UINT_LEAST8_MAX UINT16_MAX
+#endif /* __INT16_TYPE__ */
+
+#ifdef __INT_LEAST16_MIN
+# define INT_LEAST16_MIN __INT_LEAST16_MIN
+# define INT_LEAST16_MAX __INT_LEAST16_MAX
+# define UINT_LEAST16_MAX __UINT_LEAST16_MAX
+# define INT_FAST16_MIN __INT_LEAST16_MIN
+# define INT_FAST16_MAX __INT_LEAST16_MAX
+# define UINT_FAST16_MAX __UINT_LEAST16_MAX
+#endif /* __INT_LEAST16_MIN */
+
+
+#ifdef __INT8_TYPE__
+# define INT8_MAX INT8_C(127)
+# define INT8_MIN (-INT8_C(127)-1)
+# define UINT8_MAX UINT8_C(255)
+# define __INT_LEAST8_MIN INT8_MIN
+# define __INT_LEAST8_MAX INT8_MAX
+# define __UINT_LEAST8_MAX UINT8_MAX
+#endif /* __INT8_TYPE__ */
+
+#ifdef __INT_LEAST8_MIN
+# define INT_LEAST8_MIN __INT_LEAST8_MIN
+# define INT_LEAST8_MAX __INT_LEAST8_MAX
+# define UINT_LEAST8_MAX __UINT_LEAST8_MAX
+# define INT_FAST8_MIN __INT_LEAST8_MIN
+# define INT_FAST8_MAX __INT_LEAST8_MAX
+# define UINT_FAST8_MAX __UINT_LEAST8_MAX
+#endif /* __INT_LEAST8_MIN */
+
+/* Some utility macros */
+#define __INTN_MIN(n) __stdint_join3( INT, n, _MIN)
+#define __INTN_MAX(n) __stdint_join3( INT, n, _MAX)
+#define __UINTN_MAX(n) __stdint_join3(UINT, n, _MAX)
+#define __INTN_C(n, v) __stdint_join3( INT, n, _C(v))
+#define __UINTN_C(n, v) __stdint_join3(UINT, n, _C(v))
+
+/* C99 7.18.2.4 Limits of integer types capable of holding object pointers. */
+/* C99 7.18.3 Limits of other integer types. */
+
+#define INTPTR_MIN __INTN_MIN(__INTPTR_WIDTH__)
+#define INTPTR_MAX __INTN_MAX(__INTPTR_WIDTH__)
+#define UINTPTR_MAX __UINTN_MAX(__INTPTR_WIDTH__)
+#define PTRDIFF_MIN __INTN_MIN(__PTRDIFF_WIDTH__)
+#define PTRDIFF_MAX __INTN_MAX(__PTRDIFF_WIDTH__)
+#define SIZE_MAX __UINTN_MAX(__SIZE_WIDTH__)
+
+/* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
+ * is enabled. */
+#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
+#define RSIZE_MAX (SIZE_MAX >> 1)
+#endif
+
+/* C99 7.18.2.5 Limits of greatest-width integer types. */
+#define INTMAX_MIN __INTN_MIN(__INTMAX_WIDTH__)
+#define INTMAX_MAX __INTN_MAX(__INTMAX_WIDTH__)
+#define UINTMAX_MAX __UINTN_MAX(__INTMAX_WIDTH__)
+
+/* C99 7.18.3 Limits of other integer types. */
+#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
+#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
+#ifdef __WINT_UNSIGNED__
+# define WINT_MIN __UINTN_C(__WINT_WIDTH__, 0)
+# define WINT_MAX __UINTN_MAX(__WINT_WIDTH__)
+#else
+# define WINT_MIN __INTN_MIN(__WINT_WIDTH__)
+# define WINT_MAX __INTN_MAX(__WINT_WIDTH__)
+#endif
+
+#ifndef WCHAR_MAX
+# define WCHAR_MAX __WCHAR_MAX__
+#endif
+#ifndef WCHAR_MIN
+# if __WCHAR_MAX__ == __INTN_MAX(__WCHAR_WIDTH__)
+# define WCHAR_MIN __INTN_MIN(__WCHAR_WIDTH__)
+# else
+# define WCHAR_MIN __UINTN_C(__WCHAR_WIDTH__, 0)
+# endif
+#endif
+
+/* 7.18.4.2 Macros for greatest-width integer constants. */
+#define INTMAX_C(v) __INTN_C(__INTMAX_WIDTH__, v)
+#define UINTMAX_C(v) __UINTN_C(__INTMAX_WIDTH__, v)
+
+#endif /* __STDC_HOSTED__ */
+#endif /* __CLANG_STDINT_H */
diff --git a/current/clang-include/stdnoreturn.h b/current/clang-include/stdnoreturn.h
new file mode 100644
index 0000000..a7a301d
--- /dev/null
+++ b/current/clang-include/stdnoreturn.h
@@ -0,0 +1,30 @@
+/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDNORETURN_H
+#define __STDNORETURN_H
+
+#define noreturn _Noreturn
+#define __noreturn_is_defined 1
+
+#endif /* __STDNORETURN_H */
diff --git a/current/clang-include/tbmintrin.h b/current/clang-include/tbmintrin.h
new file mode 100644
index 0000000..1d0d746
--- /dev/null
+++ b/current/clang-include/tbmintrin.h
@@ -0,0 +1,154 @@
+/*===---- tbmintrin.h - TBM intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <tbmintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __TBMINTRIN_H
+#define __TBMINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("tbm")))
+
+#define __bextri_u32(a, b) \
+ ((unsigned int)__builtin_ia32_bextri_u32((unsigned int)(a), \
+ (unsigned int)(b)))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcfill_u32(unsigned int __a)
+{
+ return __a & (__a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blci_u32(unsigned int __a)
+{
+ return __a | ~(__a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcic_u32(unsigned int __a)
+{
+ return ~__a & (__a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcmsk_u32(unsigned int __a)
+{
+ return __a ^ (__a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcs_u32(unsigned int __a)
+{
+ return __a | (__a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsfill_u32(unsigned int __a)
+{
+ return __a | (__a - 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsic_u32(unsigned int __a)
+{
+ return ~__a | (__a - 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__t1mskc_u32(unsigned int __a)
+{
+ return ~__a | (__a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__tzmsk_u32(unsigned int __a)
+{
+ return ~__a & (__a - 1);
+}
+
+#ifdef __x86_64__
+#define __bextri_u64(a, b) \
+ ((unsigned long long)__builtin_ia32_bextri_u64((unsigned long long)(a), \
+ (unsigned long long)(b)))
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcfill_u64(unsigned long long __a)
+{
+ return __a & (__a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blci_u64(unsigned long long __a)
+{
+ return __a | ~(__a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcic_u64(unsigned long long __a)
+{
+ return ~__a & (__a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcmsk_u64(unsigned long long __a)
+{
+ return __a ^ (__a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcs_u64(unsigned long long __a)
+{
+ return __a | (__a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsfill_u64(unsigned long long __a)
+{
+ return __a | (__a - 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsic_u64(unsigned long long __a)
+{
+ return ~__a | (__a - 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__t1mskc_u64(unsigned long long __a)
+{
+ return ~__a | (__a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__tzmsk_u64(unsigned long long __a)
+{
+ return ~__a & (__a - 1);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __TBMINTRIN_H */
diff --git a/current/clang-include/tgmath.h b/current/clang-include/tgmath.h
new file mode 100644
index 0000000..318e118
--- /dev/null
+++ b/current/clang-include/tgmath.h
@@ -0,0 +1,1374 @@
+/*===---- tgmath.h - Standard header for type generic math ----------------===*\
+ *
+ * Copyright (c) 2009 Howard Hinnant
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __TGMATH_H
+#define __TGMATH_H
+
+/* C99 7.22 Type-generic math <tgmath.h>. */
+#include <math.h>
+
+/* C++ handles type genericity with overloading in math.h. */
+#ifndef __cplusplus
+#include <complex.h>
+
+#define _TG_ATTRSp __attribute__((__overloadable__))
+#define _TG_ATTRS __attribute__((__overloadable__, __always_inline__))
+
+// promotion
+
+typedef void _Argument_type_is_not_arithmetic;
+static _Argument_type_is_not_arithmetic __tg_promote(...)
+ __attribute__((__unavailable__,__overloadable__));
+static double _TG_ATTRSp __tg_promote(int);
+static double _TG_ATTRSp __tg_promote(unsigned int);
+static double _TG_ATTRSp __tg_promote(long);
+static double _TG_ATTRSp __tg_promote(unsigned long);
+static double _TG_ATTRSp __tg_promote(long long);
+static double _TG_ATTRSp __tg_promote(unsigned long long);
+static float _TG_ATTRSp __tg_promote(float);
+static double _TG_ATTRSp __tg_promote(double);
+static long double _TG_ATTRSp __tg_promote(long double);
+static float _Complex _TG_ATTRSp __tg_promote(float _Complex);
+static double _Complex _TG_ATTRSp __tg_promote(double _Complex);
+static long double _Complex _TG_ATTRSp __tg_promote(long double _Complex);
+
+#define __tg_promote1(__x) (__typeof__(__tg_promote(__x)))
+#define __tg_promote2(__x, __y) (__typeof__(__tg_promote(__x) + \
+ __tg_promote(__y)))
+#define __tg_promote3(__x, __y, __z) (__typeof__(__tg_promote(__x) + \
+ __tg_promote(__y) + \
+ __tg_promote(__z)))
+
+// acos
+
+static float
+ _TG_ATTRS
+ __tg_acos(float __x) {return acosf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_acos(double __x) {return acos(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_acos(long double __x) {return acosl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_acos(float _Complex __x) {return cacosf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_acos(double _Complex __x) {return cacos(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_acos(long double _Complex __x) {return cacosl(__x);}
+
+#undef acos
+#define acos(__x) __tg_acos(__tg_promote1((__x))(__x))
+
+// asin
+
+static float
+ _TG_ATTRS
+ __tg_asin(float __x) {return asinf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_asin(double __x) {return asin(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_asin(long double __x) {return asinl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_asin(float _Complex __x) {return casinf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_asin(double _Complex __x) {return casin(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_asin(long double _Complex __x) {return casinl(__x);}
+
+#undef asin
+#define asin(__x) __tg_asin(__tg_promote1((__x))(__x))
+
+// atan
+
+static float
+ _TG_ATTRS
+ __tg_atan(float __x) {return atanf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_atan(double __x) {return atan(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_atan(long double __x) {return atanl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_atan(float _Complex __x) {return catanf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_atan(double _Complex __x) {return catan(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_atan(long double _Complex __x) {return catanl(__x);}
+
+#undef atan
+#define atan(__x) __tg_atan(__tg_promote1((__x))(__x))
+
+// acosh
+
+static float
+ _TG_ATTRS
+ __tg_acosh(float __x) {return acoshf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_acosh(double __x) {return acosh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_acosh(long double __x) {return acoshl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_acosh(float _Complex __x) {return cacoshf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_acosh(double _Complex __x) {return cacosh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_acosh(long double _Complex __x) {return cacoshl(__x);}
+
+#undef acosh
+#define acosh(__x) __tg_acosh(__tg_promote1((__x))(__x))
+
+// asinh
+
+static float
+ _TG_ATTRS
+ __tg_asinh(float __x) {return asinhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_asinh(double __x) {return asinh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_asinh(long double __x) {return asinhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_asinh(float _Complex __x) {return casinhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_asinh(double _Complex __x) {return casinh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_asinh(long double _Complex __x) {return casinhl(__x);}
+
+#undef asinh
+#define asinh(__x) __tg_asinh(__tg_promote1((__x))(__x))
+
+// atanh
+
+static float
+ _TG_ATTRS
+ __tg_atanh(float __x) {return atanhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_atanh(double __x) {return atanh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_atanh(long double __x) {return atanhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_atanh(float _Complex __x) {return catanhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_atanh(double _Complex __x) {return catanh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_atanh(long double _Complex __x) {return catanhl(__x);}
+
+#undef atanh
+#define atanh(__x) __tg_atanh(__tg_promote1((__x))(__x))
+
+// cos
+
+static float
+ _TG_ATTRS
+ __tg_cos(float __x) {return cosf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cos(double __x) {return cos(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cos(long double __x) {return cosl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cos(float _Complex __x) {return ccosf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cos(double _Complex __x) {return ccos(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cos(long double _Complex __x) {return ccosl(__x);}
+
+#undef cos
+#define cos(__x) __tg_cos(__tg_promote1((__x))(__x))
+
+// sin
+
+static float
+ _TG_ATTRS
+ __tg_sin(float __x) {return sinf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sin(double __x) {return sin(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sin(long double __x) {return sinl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sin(float _Complex __x) {return csinf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sin(double _Complex __x) {return csin(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sin(long double _Complex __x) {return csinl(__x);}
+
+#undef sin
+#define sin(__x) __tg_sin(__tg_promote1((__x))(__x))
+
+// tan
+
+static float
+ _TG_ATTRS
+ __tg_tan(float __x) {return tanf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tan(double __x) {return tan(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tan(long double __x) {return tanl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_tan(float _Complex __x) {return ctanf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_tan(double _Complex __x) {return ctan(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_tan(long double _Complex __x) {return ctanl(__x);}
+
+#undef tan
+#define tan(__x) __tg_tan(__tg_promote1((__x))(__x))
+
+// cosh
+
+static float
+ _TG_ATTRS
+ __tg_cosh(float __x) {return coshf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cosh(double __x) {return cosh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cosh(long double __x) {return coshl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cosh(float _Complex __x) {return ccoshf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cosh(double _Complex __x) {return ccosh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cosh(long double _Complex __x) {return ccoshl(__x);}
+
+#undef cosh
+#define cosh(__x) __tg_cosh(__tg_promote1((__x))(__x))
+
+// sinh
+
+static float
+ _TG_ATTRS
+ __tg_sinh(float __x) {return sinhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sinh(double __x) {return sinh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sinh(long double __x) {return sinhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sinh(float _Complex __x) {return csinhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sinh(double _Complex __x) {return csinh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sinh(long double _Complex __x) {return csinhl(__x);}
+
+#undef sinh
+#define sinh(__x) __tg_sinh(__tg_promote1((__x))(__x))
+
+// tanh
+
+static float
+ _TG_ATTRS
+ __tg_tanh(float __x) {return tanhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tanh(double __x) {return tanh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tanh(long double __x) {return tanhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_tanh(float _Complex __x) {return ctanhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_tanh(double _Complex __x) {return ctanh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_tanh(long double _Complex __x) {return ctanhl(__x);}
+
+#undef tanh
+#define tanh(__x) __tg_tanh(__tg_promote1((__x))(__x))
+
+// exp
+
+static float
+ _TG_ATTRS
+ __tg_exp(float __x) {return expf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_exp(double __x) {return exp(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_exp(long double __x) {return expl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_exp(float _Complex __x) {return cexpf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_exp(double _Complex __x) {return cexp(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_exp(long double _Complex __x) {return cexpl(__x);}
+
+#undef exp
+#define exp(__x) __tg_exp(__tg_promote1((__x))(__x))
+
+// log
+
+static float
+ _TG_ATTRS
+ __tg_log(float __x) {return logf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log(double __x) {return log(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log(long double __x) {return logl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_log(float _Complex __x) {return clogf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_log(double _Complex __x) {return clog(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_log(long double _Complex __x) {return clogl(__x);}
+
+#undef log
+#define log(__x) __tg_log(__tg_promote1((__x))(__x))
+
+// pow
+
+static float
+ _TG_ATTRS
+ __tg_pow(float __x, float __y) {return powf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_pow(double __x, double __y) {return pow(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_pow(long double __x, long double __y) {return powl(__x, __y);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_pow(float _Complex __x, float _Complex __y) {return cpowf(__x, __y);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_pow(double _Complex __x, double _Complex __y) {return cpow(__x, __y);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_pow(long double _Complex __x, long double _Complex __y)
+ {return cpowl(__x, __y);}
+
+#undef pow
+#define pow(__x, __y) __tg_pow(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// sqrt
+
+static float
+ _TG_ATTRS
+ __tg_sqrt(float __x) {return sqrtf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sqrt(double __x) {return sqrt(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sqrt(long double __x) {return sqrtl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sqrt(float _Complex __x) {return csqrtf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sqrt(double _Complex __x) {return csqrt(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sqrt(long double _Complex __x) {return csqrtl(__x);}
+
+#undef sqrt
+#define sqrt(__x) __tg_sqrt(__tg_promote1((__x))(__x))
+
+// fabs
+
+static float
+ _TG_ATTRS
+ __tg_fabs(float __x) {return fabsf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_fabs(double __x) {return fabs(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_fabs(long double __x) {return fabsl(__x);}
+
+static float
+ _TG_ATTRS
+ __tg_fabs(float _Complex __x) {return cabsf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_fabs(double _Complex __x) {return cabs(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_fabs(long double _Complex __x) {return cabsl(__x);}
+
+#undef fabs
+#define fabs(__x) __tg_fabs(__tg_promote1((__x))(__x))
+
+// atan2
+
+static float
+ _TG_ATTRS
+ __tg_atan2(float __x, float __y) {return atan2f(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_atan2(double __x, double __y) {return atan2(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_atan2(long double __x, long double __y) {return atan2l(__x, __y);}
+
+#undef atan2
+#define atan2(__x, __y) __tg_atan2(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// cbrt
+
+static float
+ _TG_ATTRS
+ __tg_cbrt(float __x) {return cbrtf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cbrt(double __x) {return cbrt(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cbrt(long double __x) {return cbrtl(__x);}
+
+#undef cbrt
+#define cbrt(__x) __tg_cbrt(__tg_promote1((__x))(__x))
+
+// ceil
+
+static float
+ _TG_ATTRS
+ __tg_ceil(float __x) {return ceilf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_ceil(double __x) {return ceil(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_ceil(long double __x) {return ceill(__x);}
+
+#undef ceil
+#define ceil(__x) __tg_ceil(__tg_promote1((__x))(__x))
+
+// copysign
+
+static float
+ _TG_ATTRS
+ __tg_copysign(float __x, float __y) {return copysignf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_copysign(double __x, double __y) {return copysign(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_copysign(long double __x, long double __y) {return copysignl(__x, __y);}
+
+#undef copysign
+#define copysign(__x, __y) __tg_copysign(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// erf
+
+static float
+ _TG_ATTRS
+ __tg_erf(float __x) {return erff(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_erf(double __x) {return erf(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_erf(long double __x) {return erfl(__x);}
+
+#undef erf
+#define erf(__x) __tg_erf(__tg_promote1((__x))(__x))
+
+// erfc
+
+static float
+ _TG_ATTRS
+ __tg_erfc(float __x) {return erfcf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_erfc(double __x) {return erfc(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_erfc(long double __x) {return erfcl(__x);}
+
+#undef erfc
+#define erfc(__x) __tg_erfc(__tg_promote1((__x))(__x))
+
+// exp2
+
+static float
+ _TG_ATTRS
+ __tg_exp2(float __x) {return exp2f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_exp2(double __x) {return exp2(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_exp2(long double __x) {return exp2l(__x);}
+
+#undef exp2
+#define exp2(__x) __tg_exp2(__tg_promote1((__x))(__x))
+
+// expm1
+
+static float
+ _TG_ATTRS
+ __tg_expm1(float __x) {return expm1f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_expm1(double __x) {return expm1(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_expm1(long double __x) {return expm1l(__x);}
+
+#undef expm1
+#define expm1(__x) __tg_expm1(__tg_promote1((__x))(__x))
+
+// fdim
+
+static float
+ _TG_ATTRS
+ __tg_fdim(float __x, float __y) {return fdimf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fdim(double __x, double __y) {return fdim(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fdim(long double __x, long double __y) {return fdiml(__x, __y);}
+
+#undef fdim
+#define fdim(__x, __y) __tg_fdim(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// floor
+
+static float
+ _TG_ATTRS
+ __tg_floor(float __x) {return floorf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_floor(double __x) {return floor(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_floor(long double __x) {return floorl(__x);}
+
+#undef floor
+#define floor(__x) __tg_floor(__tg_promote1((__x))(__x))
+
+// fma
+
+static float
+ _TG_ATTRS
+ __tg_fma(float __x, float __y, float __z)
+ {return fmaf(__x, __y, __z);}
+
+static double
+ _TG_ATTRS
+ __tg_fma(double __x, double __y, double __z)
+ {return fma(__x, __y, __z);}
+
+static long double
+ _TG_ATTRS
+ __tg_fma(long double __x,long double __y, long double __z)
+ {return fmal(__x, __y, __z);}
+
+#undef fma
+#define fma(__x, __y, __z) \
+ __tg_fma(__tg_promote3((__x), (__y), (__z))(__x), \
+ __tg_promote3((__x), (__y), (__z))(__y), \
+ __tg_promote3((__x), (__y), (__z))(__z))
+
+// fmax
+
+static float
+ _TG_ATTRS
+ __tg_fmax(float __x, float __y) {return fmaxf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmax(double __x, double __y) {return fmax(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmax(long double __x, long double __y) {return fmaxl(__x, __y);}
+
+#undef fmax
+#define fmax(__x, __y) __tg_fmax(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// fmin
+
+static float
+ _TG_ATTRS
+ __tg_fmin(float __x, float __y) {return fminf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmin(double __x, double __y) {return fmin(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmin(long double __x, long double __y) {return fminl(__x, __y);}
+
+#undef fmin
+#define fmin(__x, __y) __tg_fmin(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// fmod
+
+static float
+ _TG_ATTRS
+ __tg_fmod(float __x, float __y) {return fmodf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmod(double __x, double __y) {return fmod(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmod(long double __x, long double __y) {return fmodl(__x, __y);}
+
+#undef fmod
+#define fmod(__x, __y) __tg_fmod(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// frexp
+
+static float
+ _TG_ATTRS
+ __tg_frexp(float __x, int* __y) {return frexpf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_frexp(double __x, int* __y) {return frexp(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_frexp(long double __x, int* __y) {return frexpl(__x, __y);}
+
+#undef frexp
+#define frexp(__x, __y) __tg_frexp(__tg_promote1((__x))(__x), __y)
+
+// hypot
+
+static float
+ _TG_ATTRS
+ __tg_hypot(float __x, float __y) {return hypotf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_hypot(double __x, double __y) {return hypot(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_hypot(long double __x, long double __y) {return hypotl(__x, __y);}
+
+#undef hypot
+#define hypot(__x, __y) __tg_hypot(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// ilogb
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(float __x) {return ilogbf(__x);}
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(double __x) {return ilogb(__x);}
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(long double __x) {return ilogbl(__x);}
+
+#undef ilogb
+#define ilogb(__x) __tg_ilogb(__tg_promote1((__x))(__x))
+
+// ldexp
+
+static float
+ _TG_ATTRS
+ __tg_ldexp(float __x, int __y) {return ldexpf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_ldexp(double __x, int __y) {return ldexp(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_ldexp(long double __x, int __y) {return ldexpl(__x, __y);}
+
+#undef ldexp
+#define ldexp(__x, __y) __tg_ldexp(__tg_promote1((__x))(__x), __y)
+
+// lgamma
+
+static float
+ _TG_ATTRS
+ __tg_lgamma(float __x) {return lgammaf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_lgamma(double __x) {return lgamma(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_lgamma(long double __x) {return lgammal(__x);}
+
+#undef lgamma
+#define lgamma(__x) __tg_lgamma(__tg_promote1((__x))(__x))
+
+// llrint
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(float __x) {return llrintf(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(double __x) {return llrint(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(long double __x) {return llrintl(__x);}
+
+#undef llrint
+#define llrint(__x) __tg_llrint(__tg_promote1((__x))(__x))
+
+// llround
+
+static long long
+ _TG_ATTRS
+ __tg_llround(float __x) {return llroundf(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llround(double __x) {return llround(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llround(long double __x) {return llroundl(__x);}
+
+#undef llround
+#define llround(__x) __tg_llround(__tg_promote1((__x))(__x))
+
+// log10
+
+static float
+ _TG_ATTRS
+ __tg_log10(float __x) {return log10f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log10(double __x) {return log10(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log10(long double __x) {return log10l(__x);}
+
+#undef log10
+#define log10(__x) __tg_log10(__tg_promote1((__x))(__x))
+
+// log1p
+
+static float
+ _TG_ATTRS
+ __tg_log1p(float __x) {return log1pf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log1p(double __x) {return log1p(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log1p(long double __x) {return log1pl(__x);}
+
+#undef log1p
+#define log1p(__x) __tg_log1p(__tg_promote1((__x))(__x))
+
+// log2
+
+static float
+ _TG_ATTRS
+ __tg_log2(float __x) {return log2f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log2(double __x) {return log2(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log2(long double __x) {return log2l(__x);}
+
+#undef log2
+#define log2(__x) __tg_log2(__tg_promote1((__x))(__x))
+
+// logb
+
+static float
+ _TG_ATTRS
+ __tg_logb(float __x) {return logbf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_logb(double __x) {return logb(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_logb(long double __x) {return logbl(__x);}
+
+#undef logb
+#define logb(__x) __tg_logb(__tg_promote1((__x))(__x))
+
+// lrint
+
+static long
+ _TG_ATTRS
+ __tg_lrint(float __x) {return lrintf(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lrint(double __x) {return lrint(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lrint(long double __x) {return lrintl(__x);}
+
+#undef lrint
+#define lrint(__x) __tg_lrint(__tg_promote1((__x))(__x))
+
+// lround
+
+static long
+ _TG_ATTRS
+ __tg_lround(float __x) {return lroundf(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lround(double __x) {return lround(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lround(long double __x) {return lroundl(__x);}
+
+#undef lround
+#define lround(__x) __tg_lround(__tg_promote1((__x))(__x))
+
+// nearbyint
+
+static float
+ _TG_ATTRS
+ __tg_nearbyint(float __x) {return nearbyintf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_nearbyint(double __x) {return nearbyint(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_nearbyint(long double __x) {return nearbyintl(__x);}
+
+#undef nearbyint
+#define nearbyint(__x) __tg_nearbyint(__tg_promote1((__x))(__x))
+
+// nextafter
+
+static float
+ _TG_ATTRS
+ __tg_nextafter(float __x, float __y) {return nextafterf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_nextafter(double __x, double __y) {return nextafter(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_nextafter(long double __x, long double __y) {return nextafterl(__x, __y);}
+
+#undef nextafter
+#define nextafter(__x, __y) __tg_nextafter(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// nexttoward
+
+static float
+ _TG_ATTRS
+ __tg_nexttoward(float __x, long double __y) {return nexttowardf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_nexttoward(double __x, long double __y) {return nexttoward(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_nexttoward(long double __x, long double __y) {return nexttowardl(__x, __y);}
+
+#undef nexttoward
+#define nexttoward(__x, __y) __tg_nexttoward(__tg_promote1((__x))(__x), (__y))
+
+// remainder
+
+static float
+ _TG_ATTRS
+ __tg_remainder(float __x, float __y) {return remainderf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_remainder(double __x, double __y) {return remainder(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_remainder(long double __x, long double __y) {return remainderl(__x, __y);}
+
+#undef remainder
+#define remainder(__x, __y) __tg_remainder(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// remquo
+
+static float
+ _TG_ATTRS
+ __tg_remquo(float __x, float __y, int* __z)
+ {return remquof(__x, __y, __z);}
+
+static double
+ _TG_ATTRS
+ __tg_remquo(double __x, double __y, int* __z)
+ {return remquo(__x, __y, __z);}
+
+static long double
+ _TG_ATTRS
+ __tg_remquo(long double __x,long double __y, int* __z)
+ {return remquol(__x, __y, __z);}
+
+#undef remquo
+#define remquo(__x, __y, __z) \
+ __tg_remquo(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y), \
+ (__z))
+
+// rint
+
+static float
+ _TG_ATTRS
+ __tg_rint(float __x) {return rintf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_rint(double __x) {return rint(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_rint(long double __x) {return rintl(__x);}
+
+#undef rint
+#define rint(__x) __tg_rint(__tg_promote1((__x))(__x))
+
+// round
+
+static float
+ _TG_ATTRS
+ __tg_round(float __x) {return roundf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_round(double __x) {return round(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_round(long double __x) {return roundl(__x);}
+
+#undef round
+#define round(__x) __tg_round(__tg_promote1((__x))(__x))
+
+// scalbn
+
+static float
+ _TG_ATTRS
+ __tg_scalbn(float __x, int __y) {return scalbnf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_scalbn(double __x, int __y) {return scalbn(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_scalbn(long double __x, int __y) {return scalbnl(__x, __y);}
+
+#undef scalbn
+#define scalbn(__x, __y) __tg_scalbn(__tg_promote1((__x))(__x), __y)
+
+// scalbln
+
+static float
+ _TG_ATTRS
+ __tg_scalbln(float __x, long __y) {return scalblnf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_scalbln(double __x, long __y) {return scalbln(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_scalbln(long double __x, long __y) {return scalblnl(__x, __y);}
+
+#undef scalbln
+#define scalbln(__x, __y) __tg_scalbln(__tg_promote1((__x))(__x), __y)
+
+// tgamma
+
+static float
+ _TG_ATTRS
+ __tg_tgamma(float __x) {return tgammaf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tgamma(double __x) {return tgamma(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tgamma(long double __x) {return tgammal(__x);}
+
+#undef tgamma
+#define tgamma(__x) __tg_tgamma(__tg_promote1((__x))(__x))
+
+// trunc
+
+static float
+ _TG_ATTRS
+ __tg_trunc(float __x) {return truncf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_trunc(double __x) {return trunc(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_trunc(long double __x) {return truncl(__x);}
+
+#undef trunc
+#define trunc(__x) __tg_trunc(__tg_promote1((__x))(__x))
+
+// carg
+
+static float
+ _TG_ATTRS
+ __tg_carg(float __x) {return atan2f(0.F, __x);}
+
+static double
+ _TG_ATTRS
+ __tg_carg(double __x) {return atan2(0., __x);}
+
+static long double
+ _TG_ATTRS
+ __tg_carg(long double __x) {return atan2l(0.L, __x);}
+
+static float
+ _TG_ATTRS
+ __tg_carg(float _Complex __x) {return cargf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_carg(double _Complex __x) {return carg(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_carg(long double _Complex __x) {return cargl(__x);}
+
+#undef carg
+#define carg(__x) __tg_carg(__tg_promote1((__x))(__x))
+
+// cimag
+
+static float
+ _TG_ATTRS
+ __tg_cimag(float __x) {return 0;}
+
+static double
+ _TG_ATTRS
+ __tg_cimag(double __x) {return 0;}
+
+static long double
+ _TG_ATTRS
+ __tg_cimag(long double __x) {return 0;}
+
+static float
+ _TG_ATTRS
+ __tg_cimag(float _Complex __x) {return cimagf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cimag(double _Complex __x) {return cimag(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cimag(long double _Complex __x) {return cimagl(__x);}
+
+#undef cimag
+#define cimag(__x) __tg_cimag(__tg_promote1((__x))(__x))
+
+// conj
+
+static float _Complex
+ _TG_ATTRS
+ __tg_conj(float __x) {return __x;}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_conj(double __x) {return __x;}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_conj(long double __x) {return __x;}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_conj(float _Complex __x) {return conjf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_conj(double _Complex __x) {return conj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_conj(long double _Complex __x) {return conjl(__x);}
+
+#undef conj
+#define conj(__x) __tg_conj(__tg_promote1((__x))(__x))
+
+// cproj
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cproj(float __x) {return cprojf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cproj(double __x) {return cproj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cproj(long double __x) {return cprojl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cproj(float _Complex __x) {return cprojf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cproj(double _Complex __x) {return cproj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cproj(long double _Complex __x) {return cprojl(__x);}
+
+#undef cproj
+#define cproj(__x) __tg_cproj(__tg_promote1((__x))(__x))
+
+// creal
+
+static float
+ _TG_ATTRS
+ __tg_creal(float __x) {return __x;}
+
+static double
+ _TG_ATTRS
+ __tg_creal(double __x) {return __x;}
+
+static long double
+ _TG_ATTRS
+ __tg_creal(long double __x) {return __x;}
+
+static float
+ _TG_ATTRS
+ __tg_creal(float _Complex __x) {return crealf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_creal(double _Complex __x) {return creal(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_creal(long double _Complex __x) {return creall(__x);}
+
+#undef creal
+#define creal(__x) __tg_creal(__tg_promote1((__x))(__x))
+
+#undef _TG_ATTRSp
+#undef _TG_ATTRS
+
+#endif /* __cplusplus */
+#endif /* __TGMATH_H */
diff --git a/current/clang-include/tmmintrin.h b/current/clang-include/tmmintrin.h
new file mode 100644
index 0000000..a72796b
--- /dev/null
+++ b/current/clang-include/tmmintrin.h
@@ -0,0 +1,773 @@
+/*===---- tmmintrin.h - SSSE3 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __TMMINTRIN_H
+#define __TMMINTRIN_H
+
+#include <pmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3")))
+
+/// \brief Computes the absolute value of each of the packed 8-bit signed
+/// integers in the source operand and stores the 8-bit unsigned integer
+/// results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PABSB instruction.
+///
+/// \param __a
+/// A 64-bit vector of [8 x i8].
+/// \returns A 64-bit integer vector containing the absolute values of the
+/// elements in the operand.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_abs_pi8(__m64 __a)
+{
+ return (__m64)__builtin_ia32_pabsb((__v8qi)__a);
+}
+
+/// \brief Computes the absolute value of each of the packed 8-bit signed
+/// integers in the source operand and stores the 8-bit unsigned integer
+/// results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSB instruction.
+///
+/// \param __a
+/// A 128-bit vector of [16 x i8].
+/// \returns A 128-bit integer vector containing the absolute values of the
+/// elements in the operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi8(__m128i __a)
+{
+ return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
+}
+
+/// \brief Computes the absolute value of each of the packed 16-bit signed
+/// integers in the source operand and stores the 16-bit unsigned integer
+/// results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PABSW instruction.
+///
+/// \param __a
+/// A 64-bit vector of [4 x i16].
+/// \returns A 64-bit integer vector containing the absolute values of the
+/// elements in the operand.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_abs_pi16(__m64 __a)
+{
+ return (__m64)__builtin_ia32_pabsw((__v4hi)__a);
+}
+
+/// \brief Computes the absolute value of each of the packed 16-bit signed
+/// integers in the source operand and stores the 16-bit unsigned integer
+/// results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSW instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x i16].
+/// \returns A 128-bit integer vector containing the absolute values of the
+/// elements in the operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi16(__m128i __a)
+{
+ return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
+}
+
+/// \brief Computes the absolute value of each of the packed 32-bit signed
+/// integers in the source operand and stores the 32-bit unsigned integer
+/// results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PABSD instruction.
+///
+/// \param __a
+/// A 64-bit vector of [2 x i32].
+/// \returns A 64-bit integer vector containing the absolute values of the
+/// elements in the operand.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_abs_pi32(__m64 __a)
+{
+ return (__m64)__builtin_ia32_pabsd((__v2si)__a);
+}
+
+/// \brief Computes the absolute value of each of the packed 32-bit signed
+/// integers in the source operand and stores the 32-bit unsigned integer
+/// results in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSD instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x i32].
+/// \returns A 128-bit integer vector containing the absolute values of the
+/// elements in the operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi32(__m128i __a)
+{
+ return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
+}
+
+/// \brief Concatenates the two 128-bit integer vector operands, and
+/// right-shifts the result by the number of bytes specified in the immediate
+/// operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_alignr_epi8(__m128i a, __m128i b, const int n);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c PALIGNR instruction.
+///
+/// \param a
+/// A 128-bit vector of [16 x i8] containing one of the source operands.
+/// \param b
+/// A 128-bit vector of [16 x i8] containing one of the source operands.
+/// \param n
+/// An immediate operand specifying how many bytes to right-shift the result.
+/// \returns A 128-bit integer vector containing the concatenated right-shifted
+/// value.
+#define _mm_alignr_epi8(a, b, n) __extension__ ({ \
+ (__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (n)); })
+
+/// \brief Concatenates the two 64-bit integer vector operands, and right-shifts
+/// the result by the number of bytes specified in the immediate operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m64 _mm_alignr_pi8(__m64 a, __m64 b, const int n);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c PALIGNR instruction.
+///
+/// \param a
+/// A 64-bit vector of [8 x i8] containing one of the source operands.
+/// \param b
+/// A 64-bit vector of [8 x i8] containing one of the source operands.
+/// \param n
+/// An immediate operand specifying how many bytes to right-shift the result.
+/// \returns A 64-bit integer vector containing the concatenated right-shifted
+/// value.
+#define _mm_alignr_pi8(a, b, n) __extension__ ({ \
+ (__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n)); })
+
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+/// 128-bit vectors of [8 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDW instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x i16] containing one of the source operands. The
+/// horizontal sums of the values are stored in the lower bits of the
+/// destination.
+/// \param __b
+/// A 128-bit vector of [8 x i16] containing one of the source operands. The
+/// horizontal sums of the values are stored in the upper bits of the
+/// destination.
+/// \returns A 128-bit vector of [8 x i16] containing the horizontal sums of
+/// both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hadd_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b);
+}
+
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+/// 128-bit vectors of [4 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDD instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x i32] containing one of the source operands. The
+/// horizontal sums of the values are stored in the lower bits of the
+/// destination.
+/// \param __b
+/// A 128-bit vector of [4 x i32] containing one of the source operands. The
+/// horizontal sums of the values are stored in the upper bits of the
+/// destination.
+/// \returns A 128-bit vector of [4 x i32] containing the horizontal sums of
+/// both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hadd_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b);
+}
+
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+/// 64-bit vectors of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHADDW instruction.
+///
+/// \param __a
+/// A 64-bit vector of [4 x i16] containing one of the source operands. The
+/// horizontal sums of the values are stored in the lower bits of the
+/// destination.
+/// \param __b
+/// A 64-bit vector of [4 x i16] containing one of the source operands. The
+/// horizontal sums of the values are stored in the upper bits of the
+/// destination.
+/// \returns A 64-bit vector of [4 x i16] containing the horizontal sums of both
+/// operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hadd_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b);
+}
+
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+/// 64-bit vectors of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHADDD instruction.
+///
+/// \param __a
+/// A 64-bit vector of [2 x i32] containing one of the source operands. The
+/// horizontal sums of the values are stored in the lower bits of the
+/// destination.
+/// \param __b
+/// A 64-bit vector of [2 x i32] containing one of the source operands. The
+/// horizontal sums of the values are stored in the upper bits of the
+/// destination.
+/// \returns A 64-bit vector of [2 x i32] containing the horizontal sums of both
+/// operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hadd_pi32(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b);
+}
+
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+/// 128-bit vectors of [8 x i16]. Positive sums greater than 7FFFh are
+/// saturated to 7FFFh. Negative sums less than 8000h are saturated to 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDSW instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x i16] containing one of the source operands. The
+/// horizontal sums of the values are stored in the lower bits of the
+/// destination.
+/// \param __b
+/// A 128-bit vector of [8 x i16] containing one of the source operands. The
+/// horizontal sums of the values are stored in the upper bits of the
+/// destination.
+/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated
+/// sums of both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hadds_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+/// 64-bit vectors of [4 x i16]. Positive sums greater than 7FFFh are
+/// saturated to 7FFFh. Negative sums less than 8000h are saturated to 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHADDSW instruction.
+///
+/// \param __a
+/// A 64-bit vector of [4 x i16] containing one of the source operands. The
+/// horizontal sums of the values are stored in the lower bits of the
+/// destination.
+/// \param __b
+/// A 64-bit vector of [4 x i16] containing one of the source operands. The
+/// horizontal sums of the values are stored in the upper bits of the
+/// destination.
+/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated
+/// sums of both operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hadds_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phaddsw((__v4hi)__a, (__v4hi)__b);
+}
+
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// packed 128-bit vectors of [8 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBW instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x i16] containing one of the source operands. The
+/// horizontal differences between the values are stored in the lower bits of
+/// the destination.
+/// \param __b
+/// A 128-bit vector of [8 x i16] containing one of the source operands. The
+/// horizontal differences between the values are stored in the upper bits of
+/// the destination.
+/// \returns A 128-bit vector of [8 x i16] containing the horizontal differences
+/// of both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsub_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b);
+}
+
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// packed 128-bit vectors of [4 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBD instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x i32] containing one of the source operands. The
+/// horizontal differences between the values are stored in the lower bits of
+/// the destination.
+/// \param __b
+/// A 128-bit vector of [4 x i32] containing one of the source operands. The
+/// horizontal differences between the values are stored in the upper bits of
+/// the destination.
+/// \returns A 128-bit vector of [4 x i32] containing the horizontal differences
+/// of both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsub_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b);
+}
+
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// packed 64-bit vectors of [4 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHSUBW instruction.
+///
+/// \param __a
+/// A 64-bit vector of [4 x i16] containing one of the source operands. The
+/// horizontal differences between the values are stored in the lower bits of
+/// the destination.
+/// \param __b
+/// A 64-bit vector of [4 x i16] containing one of the source operands. The
+/// horizontal differences between the values are stored in the upper bits of
+/// the destination.
+/// \returns A 64-bit vector of [4 x i16] containing the horizontal differences
+/// of both operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hsub_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phsubw((__v4hi)__a, (__v4hi)__b);
+}
+
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// packed 64-bit vectors of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHSUBD instruction.
+///
+/// \param __a
+/// A 64-bit vector of [2 x i32] containing one of the source operands. The
+/// horizontal differences between the values are stored in the lower bits of
+/// the destination.
+/// \param __b
+/// A 64-bit vector of [2 x i32] containing one of the source operands. The
+/// horizontal differences between the values are stored in the upper bits of
+/// the destination.
+/// \returns A 64-bit vector of [2 x i32] containing the horizontal differences
+/// of both operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hsub_pi32(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b);
+}
+
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// packed 128-bit vectors of [8 x i16]. Positive differences greater than
+/// 7FFFh are saturated to 7FFFh. Negative differences less than 8000h are
+/// saturated to 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBSW instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x i16] containing one of the source operands. The
+/// horizontal differences between the values are stored in the lower bits of
+/// the destination.
+/// \param __b
+/// A 128-bit vector of [8 x i16] containing one of the source operands. The
+/// horizontal differences between the values are stored in the upper bits of
+/// the destination.
+/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated
+/// differences of both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// packed 64-bit vectors of [4 x i16]. Positive differences greater than
+/// 7FFFh are saturated to 7FFFh. Negative differences less than 8000h are
+/// saturated to 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PHSUBSW instruction.
+///
+/// \param __a
+/// A 64-bit vector of [4 x i16] containing one of the source operands. The
+/// horizontal differences between the values are stored in the lower bits of
+/// the destination.
+/// \param __b
+/// A 64-bit vector of [4 x i16] containing one of the source operands. The
+/// horizontal differences between the values are stored in the upper bits of
+/// the destination.
+/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated
+/// differences of both operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hsubs_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phsubsw((__v4hi)__a, (__v4hi)__b);
+}
+
+/// \brief Multiplies corresponding pairs of packed 8-bit unsigned integer
+/// values contained in the first source operand and packed 8-bit signed
+/// integer values contained in the second source operand, adds pairs of
+/// contiguous products with signed saturation, and writes the 16-bit sums to
+/// the corresponding bits in the destination. For example, bits [7:0] of
+/// both operands are multiplied, bits [15:8] of both operands are
+/// multiplied, and the sum of both results is written to bits [15:0] of the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPMADDUBSW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the first source operand.
+/// \param __b
+/// A 128-bit integer vector containing the second source operand.
+/// \returns A 128-bit integer vector containing the sums of products of both
+/// operands:
+/// R0 := (__a0 * __b0) + (__a1 * __b1)
+/// R1 := (__a2 * __b2) + (__a3 * __b3)
+/// R2 := (__a4 * __b4) + (__a5 * __b5)
+/// R3 := (__a6 * __b6) + (__a7 * __b7)
+/// R4 := (__a8 * __b8) + (__a9 * __b9)
+/// R5 := (__a10 * __b10) + (__a11 * __b11)
+/// R6 := (__a12 * __b12) + (__a13 * __b13)
+/// R7 := (__a14 * __b14) + (__a15 * __b15)
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maddubs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__a, (__v16qi)__b);
+}
+
+/// \brief Multiplies corresponding pairs of packed 8-bit unsigned integer
+/// values contained in the first source operand and packed 8-bit signed
+/// integer values contained in the second source operand, adds pairs of
+/// contiguous products with signed saturation, and writes the 16-bit sums to
+/// the corresponding bits in the destination. For example, bits [7:0] of
+/// both operands are multiplied, bits [15:8] of both operands are
+/// multiplied, and the sum of both results is written to bits [15:0] of the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMADDUBSW instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing the first source operand.
+/// \param __b
+/// A 64-bit integer vector containing the second source operand.
+/// \returns A 64-bit integer vector containing the sums of products of both
+/// operands:
+/// R0 := (__a0 * __b0) + (__a1 * __b1)
+/// R1 := (__a2 * __b2) + (__a3 * __b3)
+/// R2 := (__a4 * __b4) + (__a5 * __b5)
+/// R3 := (__a6 * __b6) + (__a7 * __b7)
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_maddubs_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__a, (__v8qi)__b);
+}
+
+/// \brief Multiplies packed 16-bit signed integer values, truncates the 32-bit
+/// products to the 18 most significant bits by right-shifting, rounds the
+/// truncated value by adding 1, and writes bits [16:1] to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULHRSW instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x i16] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [8 x i16] containing one of the source operands.
+/// \returns A 128-bit vector of [8 x i16] containing the rounded and scaled
+/// products of both operands.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mulhrs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+/// \brief Multiplies packed 16-bit signed integer values, truncates the 32-bit
+/// products to the 18 most significant bits by right-shifting, rounds the
+/// truncated value by adding 1, and writes bits [16:1] to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMULHRSW instruction.
+///
+/// \param __a
+/// A 64-bit vector of [4 x i16] containing one of the source operands.
+/// \param __b
+/// A 64-bit vector of [4 x i16] containing one of the source operands.
+/// \returns A 64-bit vector of [4 x i16] containing the rounded and scaled
+/// products of both operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mulhrs_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__a, (__v4hi)__b);
+}
+
+/// \brief Copies the 8-bit integers from a 128-bit integer vector to the
+/// destination or clears 8-bit values in the destination, as specified by
+/// the second source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSHUFB instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the values to be copied.
+/// \param __b
+/// A 128-bit integer vector containing control bytes corresponding to
+/// positions in the destination:
+/// Bit 7:
+/// 1: Clear the corresponding byte in the destination.
+/// 0: Copy the selected source byte to the corresponding byte in the
+/// destination.
+/// Bits [6:4] Reserved.
+/// Bits [3:0] select the source byte to be copied.
+/// \returns A 128-bit integer vector containing the copied or cleared values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shuffle_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pshufb128((__v16qi)__a, (__v16qi)__b);
+}
+
+/// \brief Copies the 8-bit integers from a 64-bit integer vector to the
+/// destination or clears 8-bit values in the destination, as specified by
+/// the second source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSHUFB instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing the values to be copied.
+/// \param __b
+/// A 64-bit integer vector containing control bytes corresponding to
+/// positions in the destination:
+/// Bit 7:
+/// 1: Clear the corresponding byte in the destination.
+/// 0: Copy the selected source byte to the corresponding byte in the
+/// destination.
+/// Bits [3:0] select the source byte to be copied.
+/// \returns A 64-bit integer vector containing the copied or cleared values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_shuffle_pi8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pshufb((__v8qi)__a, (__v8qi)__b);
+}
+
+/// \brief For each 8-bit integer in the first source operand, perform one of
+/// the following actions as specified by the second source operand: If the
+/// byte in the second source is negative, calculate the two's complement of
+/// the corresponding byte in the first source, and write that value to the
+/// destination. If the byte in the second source is positive, copy the
+/// corresponding byte from the first source to the destination. If the byte
+/// in the second source is zero, clear the corresponding byte in the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGNB instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the values to be copied.
+/// \param __b
+/// A 128-bit integer vector containing control bytes corresponding to
+/// positions in the destination.
+/// \returns A 128-bit integer vector containing the resultant values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sign_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);
+}
+
+/// \brief For each 16-bit integer in the first source operand, perform one of
+/// the following actions as specified by the second source operand: If the
+/// word in the second source is negative, calculate the two's complement of
+/// the corresponding word in the first source, and write that value to the
+/// destination. If the word in the second source is positive, copy the
+/// corresponding word from the first source to the destination. If the word
+/// in the second source is zero, clear the corresponding word in the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGNW instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the values to be copied.
+/// \param __b
+/// A 128-bit integer vector containing control words corresponding to
+/// positions in the destination.
+/// \returns A 128-bit integer vector containing the resultant values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sign_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);
+}
+
+/// \brief For each 32-bit integer in the first source operand, perform one of
+/// the following actions as specified by the second source operand: If the
+/// doubleword in the second source is negative, calculate the two's
+/// complement of the corresponding word in the first source, and write that
+/// value to the destination. If the doubleword in the second source is
+/// positive, copy the corresponding word from the first source to the
+/// destination. If the doubleword in the second source is zero, clear the
+/// corresponding word in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGND instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the values to be copied.
+/// \param __b
+/// A 128-bit integer vector containing control doublewords corresponding to
+/// positions in the destination.
+/// \returns A 128-bit integer vector containing the resultant values.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sign_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);
+}
+
+/// \brief For each 8-bit integer in the first source operand, perform one of
+/// the following actions as specified by the second source operand: If the
+/// byte in the second source is negative, calculate the two's complement of
+/// the corresponding byte in the first source, and write that value to the
+/// destination. If the byte in the second source is positive, copy the
+/// corresponding byte from the first source to the destination. If the byte
+/// in the second source is zero, clear the corresponding byte in the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSIGNB instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing the values to be copied.
+/// \param __b
+/// A 64-bit integer vector containing control bytes corresponding to
+/// positions in the destination.
+/// \returns A 64-bit integer vector containing the resultant values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sign_pi8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psignb((__v8qi)__a, (__v8qi)__b);
+}
+
+/// \brief For each 16-bit integer in the first source operand, perform one of
+/// the following actions as specified by the second source operand: If the
+/// word in the second source is negative, calculate the two's complement of
+/// the corresponding word in the first source, and write that value to the
+/// destination. If the word in the second source is positive, copy the
+/// corresponding word from the first source to the destination. If the word
+/// in the second source is zero, clear the corresponding word in the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSIGNW instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing the values to be copied.
+/// \param __b
+/// A 64-bit integer vector containing control words corresponding to
+/// positions in the destination.
+/// \returns A 64-bit integer vector containing the resultant values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sign_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psignw((__v4hi)__a, (__v4hi)__b);
+}
+
+/// \brief For each 32-bit integer in the first source operand, perform one of
+/// the following actions as specified by the second source operand: If the
+/// doubleword in the second source is negative, calculate the two's
+/// complement of the corresponding doubleword in the first source, and
+/// write that value to the destination. If the doubleword in the second
+/// source is positive, copy the corresponding doubleword from the first
+/// source to the destination. If the doubleword in the second source is
+/// zero, clear the corresponding doubleword in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSIGND instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing the values to be copied.
+/// \param __b
+/// A 64-bit integer vector containing two control doublewords corresponding
+/// to positions in the destination.
+/// \returns A 64-bit integer vector containing the resultant values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sign_pi32(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psignd((__v2si)__a, (__v2si)__b);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __TMMINTRIN_H */
diff --git a/current/clang-include/unwind.h b/current/clang-include/unwind.h
new file mode 100644
index 0000000..4f74a34
--- /dev/null
+++ b/current/clang-include/unwind.h
@@ -0,0 +1,299 @@
+/*===---- unwind.h - Stack unwinding ----------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/
+
+#ifndef __CLANG_UNWIND_H
+#define __CLANG_UNWIND_H
+
+#if defined(__APPLE__) && __has_include_next(<unwind.h>)
+/* Darwin (from 11.x on) provide an unwind.h. If that's available,
+ * use it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
+ * so define that around the include.*/
+# ifndef _GNU_SOURCE
+# define _SHOULD_UNDEFINE_GNU_SOURCE
+# define _GNU_SOURCE
+# endif
+// libunwind's unwind.h reflects the current visibility. However, Mozilla
+// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the
+// visibility to default and export its contents. gcc also allows users to
+// override its override by #defining HIDE_EXPORTS (but note, this only obeys
+// the user's -fvisibility setting; it doesn't hide any exports on its own). We
+// imitate gcc's header here:
+# ifdef HIDE_EXPORTS
+# include_next <unwind.h>
+# else
+# pragma GCC visibility push(default)
+# include_next <unwind.h>
+# pragma GCC visibility pop
+# endif
+# ifdef _SHOULD_UNDEFINE_GNU_SOURCE
+# undef _GNU_SOURCE
+# undef _SHOULD_UNDEFINE_GNU_SOURCE
+# endif
+#else
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* It is a bit strange for a header to play with the visibility of the
+ symbols it declares, but this matches gcc's behavior and some programs
+ depend on it */
+#ifndef HIDE_EXPORTS
+#pragma GCC visibility push(default)
+#endif
+
+typedef uintptr_t _Unwind_Word;
+typedef intptr_t _Unwind_Sword;
+typedef uintptr_t _Unwind_Ptr;
+typedef uintptr_t _Unwind_Internal_Ptr;
+typedef uint64_t _Unwind_Exception_Class;
+
+typedef intptr_t _sleb128_t;
+typedef uintptr_t _uleb128_t;
+
+struct _Unwind_Context;
+struct _Unwind_Exception;
+typedef enum {
+ _URC_NO_REASON = 0,
+#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
+ !defined(__ARM_DWARF_EH__)
+ _URC_OK = 0, /* used by ARM EHABI */
+#endif
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+
+ _URC_FATAL_PHASE2_ERROR = 2,
+ _URC_FATAL_PHASE1_ERROR = 3,
+ _URC_NORMAL_STOP = 4,
+
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8,
+#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
+ !defined(__ARM_DWARF_EH__)
+ _URC_FAILURE = 9 /* used by ARM EHABI */
+#endif
+} _Unwind_Reason_Code;
+
+typedef enum {
+ _UA_SEARCH_PHASE = 1,
+ _UA_CLEANUP_PHASE = 2,
+
+ _UA_HANDLER_FRAME = 4,
+ _UA_FORCE_UNWIND = 8,
+ _UA_END_OF_STACK = 16 /* gcc extension to C++ ABI */
+} _Unwind_Action;
+
+typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
+ struct _Unwind_Exception *);
+
+struct _Unwind_Exception {
+ _Unwind_Exception_Class exception_class;
+ _Unwind_Exception_Cleanup_Fn exception_cleanup;
+ _Unwind_Word private_1;
+ _Unwind_Word private_2;
+ /* The Itanium ABI requires that _Unwind_Exception objects are "double-word
+ * aligned". GCC has interpreted this to mean "use the maximum useful
+ * alignment for the target"; so do we. */
+} __attribute__((__aligned__));
+
+typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)(int, _Unwind_Action,
+ _Unwind_Exception_Class,
+ struct _Unwind_Exception *,
+ struct _Unwind_Context *,
+ void *);
+
+typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn)(
+ int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
+ struct _Unwind_Context *);
+typedef _Unwind_Personality_Fn __personality_routine;
+
+typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
+ void *);
+
+#if defined(__arm__) && !defined(__APPLE__)
+
+typedef enum {
+ _UVRSC_CORE = 0, /* integer register */
+ _UVRSC_VFP = 1, /* vfp */
+ _UVRSC_WMMXD = 3, /* Intel WMMX data register */
+ _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+} _Unwind_VRS_RegClass;
+
+typedef enum {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5
+} _Unwind_VRS_DataRepresentation;
+
+typedef enum {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2
+} _Unwind_VRS_Result;
+
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__ARM_DWARF_EH__)
+typedef uint32_t _Unwind_State;
+#define _US_VIRTUAL_UNWIND_FRAME ((_Unwind_State)0)
+#define _US_UNWIND_FRAME_STARTING ((_Unwind_State)1)
+#define _US_UNWIND_FRAME_RESUME ((_Unwind_State)2)
+#define _US_ACTION_MASK ((_Unwind_State)3)
+#define _US_FORCE_UNWIND ((_Unwind_State)8)
+#endif
+
+_Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context,
+ _Unwind_VRS_RegClass __regclass,
+ uint32_t __regno,
+ _Unwind_VRS_DataRepresentation __representation,
+ void *__valuep);
+
+_Unwind_VRS_Result _Unwind_VRS_Set(struct _Unwind_Context *__context,
+ _Unwind_VRS_RegClass __regclass,
+ uint32_t __regno,
+ _Unwind_VRS_DataRepresentation __representation,
+ void *__valuep);
+
+static __inline__
+_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *__context, int __index) {
+ _Unwind_Word __value;
+ _Unwind_VRS_Get(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
+ return __value;
+}
+
+static __inline__
+void _Unwind_SetGR(struct _Unwind_Context *__context, int __index,
+ _Unwind_Word __value) {
+ _Unwind_VRS_Set(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
+}
+
+static __inline__
+_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *__context) {
+ _Unwind_Word __ip = _Unwind_GetGR(__context, 15);
+ return __ip & ~(_Unwind_Word)(0x1); /* Remove thumb mode bit. */
+}
+
+static __inline__
+void _Unwind_SetIP(struct _Unwind_Context *__context, _Unwind_Word __value) {
+ _Unwind_Word __thumb_mode_bit = _Unwind_GetGR(__context, 15) & 0x1;
+ _Unwind_SetGR(__context, 15, __value | __thumb_mode_bit);
+}
+#else
+_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *, int);
+void _Unwind_SetGR(struct _Unwind_Context *, int, _Unwind_Word);
+
+_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *);
+void _Unwind_SetIP(struct _Unwind_Context *, _Unwind_Word);
+#endif
+
+
+_Unwind_Word _Unwind_GetIPInfo(struct _Unwind_Context *, int *);
+
+_Unwind_Word _Unwind_GetCFA(struct _Unwind_Context *);
+
+_Unwind_Word _Unwind_GetBSP(struct _Unwind_Context *);
+
+void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context *);
+
+_Unwind_Ptr _Unwind_GetRegionStart(struct _Unwind_Context *);
+
+/* DWARF EH functions; currently not available on Darwin/ARM */
+#if !defined(__APPLE__) || !defined(__arm__)
+
+_Unwind_Reason_Code _Unwind_RaiseException(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_ForcedUnwind(struct _Unwind_Exception *,
+ _Unwind_Stop_Fn, void *);
+void _Unwind_DeleteException(struct _Unwind_Exception *);
+void _Unwind_Resume(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_Resume_or_Rethrow(struct _Unwind_Exception *);
+
+#endif
+
+_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void *);
+
+/* setjmp(3)/longjmp(3) stuff */
+typedef struct SjLj_Function_Context *_Unwind_FunctionContext_t;
+
+void _Unwind_SjLj_Register(_Unwind_FunctionContext_t);
+void _Unwind_SjLj_Unregister(_Unwind_FunctionContext_t);
+_Unwind_Reason_Code _Unwind_SjLj_RaiseException(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind(struct _Unwind_Exception *,
+ _Unwind_Stop_Fn, void *);
+void _Unwind_SjLj_Resume(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow(struct _Unwind_Exception *);
+
+void *_Unwind_FindEnclosingFunction(void *);
+
+#ifdef __APPLE__
+
+_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *)
+ __attribute__((__unavailable__));
+_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *)
+ __attribute__((__unavailable__));
+
+/* Darwin-specific functions */
+void __register_frame(const void *);
+void __deregister_frame(const void *);
+
+struct dwarf_eh_bases {
+ uintptr_t tbase;
+ uintptr_t dbase;
+ uintptr_t func;
+};
+void *_Unwind_Find_FDE(const void *, struct dwarf_eh_bases *);
+
+void __register_frame_info_bases(const void *, void *, void *, void *)
+ __attribute__((__unavailable__));
+void __register_frame_info(const void *, void *) __attribute__((__unavailable__));
+void __register_frame_info_table_bases(const void *, void*, void *, void *)
+ __attribute__((__unavailable__));
+void __register_frame_info_table(const void *, void *)
+ __attribute__((__unavailable__));
+void __register_frame_table(const void *) __attribute__((__unavailable__));
+void __deregister_frame_info(const void *) __attribute__((__unavailable__));
+void __deregister_frame_info_bases(const void *)__attribute__((__unavailable__));
+
+#else
+
+_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *);
+_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *);
+
+#endif
+
+
+#ifndef HIDE_EXPORTS
+#pragma GCC visibility pop
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+#endif /* __CLANG_UNWIND_H */
diff --git a/current/clang-include/vadefs.h b/current/clang-include/vadefs.h
new file mode 100644
index 0000000..7fe9a74
--- /dev/null
+++ b/current/clang-include/vadefs.h
@@ -0,0 +1,65 @@
+/* ===-------- vadefs.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we are aiming for MSVC compatibility. */
+#ifndef _MSC_VER
+#include_next <vadefs.h>
+#else
+
+#ifndef __clang_vadefs_h
+#define __clang_vadefs_h
+
+#include_next <vadefs.h>
+
+/* Override macros from vadefs.h with definitions that work with Clang. */
+#ifdef _crt_va_start
+#undef _crt_va_start
+#define _crt_va_start(ap, param) __builtin_va_start(ap, param)
+#endif
+#ifdef _crt_va_end
+#undef _crt_va_end
+#define _crt_va_end(ap) __builtin_va_end(ap)
+#endif
+#ifdef _crt_va_arg
+#undef _crt_va_arg
+#define _crt_va_arg(ap, type) __builtin_va_arg(ap, type)
+#endif
+
+/* VS 2015 switched to double underscore names, which is an improvement, but now
+ * we have to intercept those names too.
+ */
+#ifdef __crt_va_start
+#undef __crt_va_start
+#define __crt_va_start(ap, param) __builtin_va_start(ap, param)
+#endif
+#ifdef __crt_va_end
+#undef __crt_va_end
+#define __crt_va_end(ap) __builtin_va_end(ap)
+#endif
+#ifdef __crt_va_arg
+#undef __crt_va_arg
+#define __crt_va_arg(ap, type) __builtin_va_arg(ap, type)
+#endif
+
+#endif
+#endif
diff --git a/current/clang-include/varargs.h b/current/clang-include/varargs.h
new file mode 100644
index 0000000..b5477d0
--- /dev/null
+++ b/current/clang-include/varargs.h
@@ -0,0 +1,26 @@
+/*===---- varargs.h - Variable argument handling -------------------------------------===
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+* THE SOFTWARE.
+*
+*===-----------------------------------------------------------------------===
+*/
+#ifndef __VARARGS_H
+#define __VARARGS_H
+ #error "Please use <stdarg.h> instead of <varargs.h>"
+#endif
diff --git a/current/clang-include/vecintrin.h b/current/clang-include/vecintrin.h
new file mode 100644
index 0000000..ca7acb4
--- /dev/null
+++ b/current/clang-include/vecintrin.h
@@ -0,0 +1,8946 @@
+/*===---- vecintrin.h - Vector intrinsics ----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if defined(__s390x__) && defined(__VEC__)
+
+#define __ATTRS_ai __attribute__((__always_inline__))
+#define __ATTRS_o __attribute__((__overloadable__))
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+#define __constant(PARM) \
+ __attribute__((__enable_if__ ((PARM) == (PARM), \
+ "argument must be a constant integer")))
+#define __constant_range(PARM, LOW, HIGH) \
+ __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH), \
+ "argument must be a constant integer from " #LOW " to " #HIGH)))
+#define __constant_pow2_range(PARM, LOW, HIGH) \
+ __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH) && \
+ ((PARM) & ((PARM) - 1)) == 0, \
+ "argument must be a constant power of 2 from " #LOW " to " #HIGH)))
+
+/*-- __lcbb -----------------------------------------------------------------*/
+
+extern __ATTRS_o unsigned int
+__lcbb(const void *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+#define __lcbb(X, Y) ((__typeof__((__lcbb)((X), (Y)))) \
+ __builtin_s390_lcbb((X), __builtin_constant_p((Y))? \
+ ((Y) == 64 ? 0 : \
+ (Y) == 128 ? 1 : \
+ (Y) == 256 ? 2 : \
+ (Y) == 512 ? 3 : \
+ (Y) == 1024 ? 4 : \
+ (Y) == 2048 ? 5 : \
+ (Y) == 4096 ? 6 : 0) : 0))
+
+/*-- vec_extract ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai signed char
+vec_extract(vector signed char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(vector bool char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(vector unsigned char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai signed short
+vec_extract(vector signed short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(vector bool short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(vector unsigned short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai signed int
+vec_extract(vector signed int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(vector bool int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(vector unsigned int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai signed long long
+vec_extract(vector signed long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(vector bool long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(vector unsigned long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai double
+vec_extract(vector double __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+/*-- vec_insert -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_insert(signed char __scalar, vector signed char __vec, int __index) {
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert(unsigned char __scalar, vector bool char __vec, int __index) {
+ vector unsigned char __newvec = (vector unsigned char)__vec;
+ __newvec[__index & 15] = (unsigned char)__scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert(unsigned char __scalar, vector unsigned char __vec, int __index) {
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_insert(signed short __scalar, vector signed short __vec, int __index) {
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert(unsigned short __scalar, vector bool short __vec, int __index) {
+ vector unsigned short __newvec = (vector unsigned short)__vec;
+ __newvec[__index & 7] = (unsigned short)__scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert(unsigned short __scalar, vector unsigned short __vec, int __index) {
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_insert(signed int __scalar, vector signed int __vec, int __index) {
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert(unsigned int __scalar, vector bool int __vec, int __index) {
+ vector unsigned int __newvec = (vector unsigned int)__vec;
+ __newvec[__index & 3] = __scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert(unsigned int __scalar, vector unsigned int __vec, int __index) {
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_insert(signed long long __scalar, vector signed long long __vec,
+ int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert(unsigned long long __scalar, vector bool long long __vec,
+ int __index) {
+ vector unsigned long long __newvec = (vector unsigned long long)__vec;
+ __newvec[__index & 1] = __scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert(unsigned long long __scalar, vector unsigned long long __vec,
+ int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_insert(double __scalar, vector double __vec, int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+/*-- vec_promote ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_promote(signed char __scalar, int __index) {
+ const vector signed char __zero = (vector signed char)0;
+ vector signed char __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_promote(unsigned char __scalar, int __index) {
+ const vector unsigned char __zero = (vector unsigned char)0;
+ vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_promote(signed short __scalar, int __index) {
+ const vector signed short __zero = (vector signed short)0;
+ vector signed short __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_promote(unsigned short __scalar, int __index) {
+ const vector unsigned short __zero = (vector unsigned short)0;
+ vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_promote(signed int __scalar, int __index) {
+ const vector signed int __zero = (vector signed int)0;
+ vector signed int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_promote(unsigned int __scalar, int __index) {
+ const vector unsigned int __zero = (vector unsigned int)0;
+ vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_promote(signed long long __scalar, int __index) {
+ const vector signed long long __zero = (vector signed long long)0;
+ vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_promote(unsigned long long __scalar, int __index) {
+ const vector unsigned long long __zero = (vector unsigned long long)0;
+ vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_promote(double __scalar, int __index) {
+ const vector double __zero = (vector double)0;
+ vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+/*-- vec_insert_and_zero ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_insert_and_zero(const signed char *__ptr) {
+ vector signed char __vec = (vector signed char)0;
+ __vec[7] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert_and_zero(const unsigned char *__ptr) {
+ vector unsigned char __vec = (vector unsigned char)0;
+ __vec[7] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_insert_and_zero(const signed short *__ptr) {
+ vector signed short __vec = (vector signed short)0;
+ __vec[3] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert_and_zero(const unsigned short *__ptr) {
+ vector unsigned short __vec = (vector unsigned short)0;
+ __vec[3] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_insert_and_zero(const signed int *__ptr) {
+ vector signed int __vec = (vector signed int)0;
+ __vec[1] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert_and_zero(const unsigned int *__ptr) {
+ vector unsigned int __vec = (vector unsigned int)0;
+ __vec[1] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_insert_and_zero(const signed long long *__ptr) {
+ vector signed long long __vec = (vector signed long long)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert_and_zero(const unsigned long long *__ptr) {
+ vector unsigned long long __vec = (vector unsigned long long)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_insert_and_zero(const double *__ptr) {
+ vector double __vec = (vector double)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+/*-- vec_perm ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_perm(vector signed char __a, vector signed char __b,
+ vector unsigned char __c) {
+ return (vector signed char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_perm(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector unsigned char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_perm(vector bool char __a, vector bool char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_perm(vector signed short __a, vector signed short __b,
+ vector unsigned char __c) {
+ return (vector signed short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_perm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned char __c) {
+ return (vector unsigned short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_perm(vector bool short __a, vector bool short __b,
+ vector unsigned char __c) {
+ return (vector bool short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_perm(vector signed int __a, vector signed int __b,
+ vector unsigned char __c) {
+ return (vector signed int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_perm(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned char __c) {
+ return (vector unsigned int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_perm(vector bool int __a, vector bool int __b,
+ vector unsigned char __c) {
+ return (vector bool int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_perm(vector signed long long __a, vector signed long long __b,
+ vector unsigned char __c) {
+ return (vector signed long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c) {
+ return (vector unsigned long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_perm(vector bool long long __a, vector bool long long __b,
+ vector unsigned char __c) {
+ return (vector bool long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_perm(vector double __a, vector double __b,
+ vector unsigned char __c) {
+ return (vector double)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+/*-- vec_permi --------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed long long
+vec_permi(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned long long
+vec_permi(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector bool long long
+vec_permi(vector bool long long __a, vector bool long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector double
+vec_permi(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 3);
+
+#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \
+ __builtin_s390_vpdi((vector unsigned long long)(X), \
+ (vector unsigned long long)(Y), \
+ (((Z) & 2) << 1) | ((Z) & 1)))
+
+/*-- vec_sel ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sel(vector signed char __a, vector signed char __b,
+ vector unsigned char __c) {
+ return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
+ return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
+ return ((vector bool char)__c & __b) | (~(vector bool char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sel(vector bool char __a, vector bool char __b, vector bool char __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sel(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sel(vector unsigned char __a, vector unsigned char __b,
+ vector bool char __c) {
+ return ((vector unsigned char)__c & __b) | (~(vector unsigned char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sel(vector signed short __a, vector signed short __b,
+ vector unsigned short __c) {
+ return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sel(vector signed short __a, vector signed short __b,
+ vector bool short __c) {
+ return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sel(vector bool short __a, vector bool short __b,
+ vector unsigned short __c) {
+ return ((vector bool short)__c & __b) | (~(vector bool short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+ vector bool short __c) {
+ return (((vector unsigned short)__c & __b) |
+ (~(vector unsigned short)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sel(vector signed int __a, vector signed int __b,
+ vector unsigned int __c) {
+ return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sel(vector signed int __a, vector signed int __b, vector bool int __c) {
+ return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
+ return ((vector bool int)__c & __b) | (~(vector bool int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sel(vector bool int __a, vector bool int __b, vector bool int __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sel(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
+ return ((vector unsigned int)__c & __b) | (~(vector unsigned int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sel(vector signed long long __a, vector signed long long __b,
+ vector unsigned long long __c) {
+ return (((vector signed long long)__c & __b) |
+ (~(vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sel(vector signed long long __a, vector signed long long __b,
+ vector bool long long __c) {
+ return (((vector signed long long)__c & __b) |
+ (~(vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sel(vector bool long long __a, vector bool long long __b,
+ vector unsigned long long __c) {
+ return (((vector bool long long)__c & __b) |
+ (~(vector bool long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sel(vector bool long long __a, vector bool long long __b,
+ vector bool long long __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sel(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sel(vector unsigned long long __a, vector unsigned long long __b,
+ vector bool long long __c) {
+ return (((vector unsigned long long)__c & __b) |
+ (~(vector unsigned long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
+ return (vector double)((__c & (vector unsigned long long)__b) |
+ (~__c & (vector unsigned long long)__a));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_sel(vector double __a, vector double __b, vector bool long long __c) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ vector unsigned long long __cc = (vector unsigned long long)__c;
+ return (vector double)((__cc & __bc) | (~__cc & __ac));
+}
+
+/*-- vec_gather_element -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed int
+vec_gather_element(vector signed int __vec, vector unsigned int __offset,
+ const signed int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const signed int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_gather_element(vector bool int __vec, vector unsigned int __offset,
+ const unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const unsigned int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gather_element(vector unsigned int __vec, vector unsigned int __offset,
+ const unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const unsigned int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_gather_element(vector signed long long __vec,
+ vector unsigned long long __offset,
+ const signed long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const signed long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_gather_element(vector bool long long __vec,
+ vector unsigned long long __offset,
+ const unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const unsigned long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gather_element(vector unsigned long long __vec,
+ vector unsigned long long __offset,
+ const unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const unsigned long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_gather_element(vector double __vec, vector unsigned long long __offset,
+ const double *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const double *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+/*-- vec_scatter_element ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector signed int __vec, vector unsigned int __offset,
+ signed int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(signed int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector bool int __vec, vector unsigned int __offset,
+ unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector unsigned int __vec, vector unsigned int __offset,
+ unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector signed long long __vec,
+ vector unsigned long long __offset,
+ signed long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(signed long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector bool long long __vec,
+ vector unsigned long long __offset,
+ unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector unsigned long long __vec,
+ vector unsigned long long __offset,
+ unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector double __vec, vector unsigned long long __offset,
+ double *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(double *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+/*-- vec_xld2 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_xld2(long __offset, const signed char *__ptr) {
+ return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_xld2(long __offset, const unsigned char *__ptr) {
+ return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_xld2(long __offset, const signed short *__ptr) {
+ return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_xld2(long __offset, const unsigned short *__ptr) {
+ return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_xld2(long __offset, const signed int *__ptr) {
+ return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_xld2(long __offset, const unsigned int *__ptr) {
+ return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_xld2(long __offset, const signed long long *__ptr) {
+ return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_xld2(long __offset, const unsigned long long *__ptr) {
+ return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_xld2(long __offset, const double *__ptr) {
+ return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+/*-- vec_xlw4 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_xlw4(long __offset, const signed char *__ptr) {
+ return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_xlw4(long __offset, const unsigned char *__ptr) {
+ return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_xlw4(long __offset, const signed short *__ptr) {
+ return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_xlw4(long __offset, const unsigned short *__ptr) {
+ return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_xlw4(long __offset, const signed int *__ptr) {
+ return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_xlw4(long __offset, const unsigned int *__ptr) {
+ return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+/*-- vec_xstd2 --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed char __vec, long __offset, signed char *__ptr) {
+ *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed short __vec, long __offset, signed short *__ptr) {
+ *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed int __vec, long __offset, signed int *__ptr) {
+ *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed long long __vec, long __offset,
+ signed long long *__ptr) {
+ *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned long long __vec, long __offset,
+ unsigned long long *__ptr) {
+ *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
+ __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector double __vec, long __offset, double *__ptr) {
+ *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+/*-- vec_xstw4 --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed char __vec, long __offset, signed char *__ptr) {
+ *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed short __vec, long __offset, signed short *__ptr) {
+ *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed int __vec, long __offset, signed int *__ptr) {
+ *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+/*-- vec_load_bndry ---------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_load_bndry(const signed char *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned char
+vec_load_bndry(const unsigned char *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed short
+vec_load_bndry(const signed short *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned short
+vec_load_bndry(const unsigned short *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed int
+vec_load_bndry(const signed int *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned int
+vec_load_bndry(const unsigned int *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed long long
+vec_load_bndry(const signed long long *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned long long
+vec_load_bndry(const unsigned long long *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector double
+vec_load_bndry(const double *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+#define vec_load_bndry(X, Y) ((__typeof__((vec_load_bndry)((X), (Y)))) \
+ __builtin_s390_vlbb((X), ((Y) == 64 ? 0 : \
+ (Y) == 128 ? 1 : \
+ (Y) == 256 ? 2 : \
+ (Y) == 512 ? 3 : \
+ (Y) == 1024 ? 4 : \
+ (Y) == 2048 ? 5 : \
+ (Y) == 4096 ? 6 : -1)))
+
+/*-- vec_load_len -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_load_len(const signed char *__ptr, unsigned int __len) {
+ return (vector signed char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_load_len(const unsigned char *__ptr, unsigned int __len) {
+ return (vector unsigned char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_load_len(const signed short *__ptr, unsigned int __len) {
+ return (vector signed short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_load_len(const unsigned short *__ptr, unsigned int __len) {
+ return (vector unsigned short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_load_len(const signed int *__ptr, unsigned int __len) {
+ return (vector signed int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_load_len(const unsigned int *__ptr, unsigned int __len) {
+ return (vector unsigned int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_load_len(const signed long long *__ptr, unsigned int __len) {
+ return (vector signed long long)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_load_len(const unsigned long long *__ptr, unsigned int __len) {
+ return (vector unsigned long long)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_load_len(const double *__ptr, unsigned int __len) {
+ return (vector double)__builtin_s390_vll(__len, __ptr);
+}
+
+/*-- vec_store_len ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed char __vec, signed char *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned char __vec, unsigned char *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed short __vec, signed short *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned short __vec, unsigned short *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed int __vec, signed int *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned int __vec, unsigned int *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed long long __vec, signed long long *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned long long __vec, unsigned long long *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector double __vec, double *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+/*-- vec_load_pair ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_load_pair(signed long long __a, signed long long __b) {
+ return (vector signed long long)(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_load_pair(unsigned long long __a, unsigned long long __b) {
+ return (vector unsigned long long)(__a, __b);
+}
+
+/*-- vec_genmask ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_genmask(unsigned short __mask)
+ __constant(__mask) {
+ return (vector unsigned char)(
+ __mask & 0x8000 ? 0xff : 0,
+ __mask & 0x4000 ? 0xff : 0,
+ __mask & 0x2000 ? 0xff : 0,
+ __mask & 0x1000 ? 0xff : 0,
+ __mask & 0x0800 ? 0xff : 0,
+ __mask & 0x0400 ? 0xff : 0,
+ __mask & 0x0200 ? 0xff : 0,
+ __mask & 0x0100 ? 0xff : 0,
+ __mask & 0x0080 ? 0xff : 0,
+ __mask & 0x0040 ? 0xff : 0,
+ __mask & 0x0020 ? 0xff : 0,
+ __mask & 0x0010 ? 0xff : 0,
+ __mask & 0x0008 ? 0xff : 0,
+ __mask & 0x0004 ? 0xff : 0,
+ __mask & 0x0002 ? 0xff : 0,
+ __mask & 0x0001 ? 0xff : 0);
+}
+
+/*-- vec_genmasks_* ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_genmasks_8(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 7;
+ unsigned char __bit2 = __last & 7;
+ unsigned char __mask1 = (unsigned char)(1U << (7 - __bit1) << 1) - 1;
+ unsigned char __mask2 = (unsigned char)(1U << (7 - __bit2)) - 1;
+ unsigned char __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned char)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_genmasks_16(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 15;
+ unsigned char __bit2 = __last & 15;
+ unsigned short __mask1 = (unsigned short)(1U << (15 - __bit1) << 1) - 1;
+ unsigned short __mask2 = (unsigned short)(1U << (15 - __bit2)) - 1;
+ unsigned short __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned short)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_genmasks_32(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 31;
+ unsigned char __bit2 = __last & 31;
+ unsigned int __mask1 = (1U << (31 - __bit1) << 1) - 1;
+ unsigned int __mask2 = (1U << (31 - __bit2)) - 1;
+ unsigned int __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned int)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_genmasks_64(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 63;
+ unsigned char __bit2 = __last & 63;
+ unsigned long long __mask1 = (1ULL << (63 - __bit1) << 1) - 1;
+ unsigned long long __mask2 = (1ULL << (63 - __bit2)) - 1;
+ unsigned long long __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned long long)__value;
+}
+
+/*-- vec_splat --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_splat(vector signed char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector signed char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_splat(vector bool char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector bool char)(vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_splat(vector unsigned char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_splat(vector signed short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector signed short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_splat(vector bool short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector bool short)(vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_splat(vector unsigned short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_splat(vector signed int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector signed int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_splat(vector bool int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector bool int)(vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_splat(vector unsigned int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_splat(vector signed long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector signed long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_splat(vector bool long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector bool long long)(vector unsigned long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_splat(vector unsigned long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector unsigned long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector double
+vec_splat(vector double __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector double)__vec[__index];
+}
+
+/*-- vec_splat_s* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector signed char
+vec_splat_s8(signed char __scalar)
+ __constant(__scalar) {
+ return (vector signed char)__scalar;
+}
+
+static inline __ATTRS_ai vector signed short
+vec_splat_s16(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed short)__scalar;
+}
+
+static inline __ATTRS_ai vector signed int
+vec_splat_s32(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai vector signed long long
+vec_splat_s64(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed long long)(signed long)__scalar;
+}
+
+/*-- vec_splat_u* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_splat_u8(unsigned char __scalar)
+ __constant(__scalar) {
+ return (vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned short
+vec_splat_u16(unsigned short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned int
+vec_splat_u32(signed short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned long long
+vec_splat_u64(signed short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned long long)(signed long long)__scalar;
+}
+
+/*-- vec_splats -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_splats(signed char __scalar) {
+ return (vector signed char)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_splats(unsigned char __scalar) {
+ return (vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_splats(signed short __scalar) {
+ return (vector signed short)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_splats(unsigned short __scalar) {
+ return (vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_splats(signed int __scalar) {
+ return (vector signed int)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_splats(unsigned int __scalar) {
+ return (vector unsigned int)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_splats(signed long long __scalar) {
+ return (vector signed long long)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_splats(unsigned long long __scalar) {
+ return (vector unsigned long long)__scalar;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_splats(double __scalar) {
+ return (vector double)__scalar;
+}
+
+/*-- vec_extend_s64 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed char __a) {
+ return (vector signed long long)(__a[7], __a[15]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed short __a) {
+ return (vector signed long long)(__a[3], __a[7]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed int __a) {
+ return (vector signed long long)(__a[1], __a[3]);
+}
+
+/*-- vec_mergeh -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mergeh(vector signed char __a, vector signed char __b) {
+ return (vector signed char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_mergeh(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mergeh(vector signed short __a, vector signed short __b) {
+ return (vector signed short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_mergeh(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mergeh(vector signed int __a, vector signed int __b) {
+ return (vector signed int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_mergeh(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mergeh(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_mergeh(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_mergeh(vector double __a, vector double __b) {
+ return (vector double)(__a[0], __b[0]);
+}
+
+/*-- vec_mergel -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mergel(vector signed char __a, vector signed char __b) {
+ return (vector signed char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_mergel(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mergel(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mergel(vector signed short __a, vector signed short __b) {
+ return (vector signed short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_mergel(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mergel(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mergel(vector signed int __a, vector signed int __b) {
+ return (vector signed int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_mergel(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mergel(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mergel(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_mergel(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_mergel(vector double __a, vector double __b) {
+ return (vector double)(__a[1], __b[1]);
+}
+
+/*-- vec_pack ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_pack(vector signed short __a, vector signed short __b) {
+ vector signed char __ac = (vector signed char)__a;
+ vector signed char __bc = (vector signed char)__b;
+ return (vector signed char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_pack(vector bool short __a, vector bool short __b) {
+ vector bool char __ac = (vector bool char)__a;
+ vector bool char __bc = (vector bool char)__b;
+ return (vector bool char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_pack(vector unsigned short __a, vector unsigned short __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return (vector unsigned char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_pack(vector signed int __a, vector signed int __b) {
+ vector signed short __ac = (vector signed short)__a;
+ vector signed short __bc = (vector signed short)__b;
+ return (vector signed short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_pack(vector bool int __a, vector bool int __b) {
+ vector bool short __ac = (vector bool short)__a;
+ vector bool short __bc = (vector bool short)__b;
+ return (vector bool short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_pack(vector unsigned int __a, vector unsigned int __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return (vector unsigned short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_pack(vector signed long long __a, vector signed long long __b) {
+ vector signed int __ac = (vector signed int)__a;
+ vector signed int __bc = (vector signed int)__b;
+ return (vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_pack(vector bool long long __a, vector bool long long __b) {
+ vector bool int __ac = (vector bool int)__a;
+ vector bool int __bc = (vector bool int)__b;
+ return (vector bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return (vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+/*-- vec_packs --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_packs(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vpksh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_packs(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vpksf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packs(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_packs(vector signed long long __a, vector signed long long __b) {
+ return __builtin_s390_vpksg(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packs_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_packs_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return __builtin_s390_vpkshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packs_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_packs_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return __builtin_s390_vpksfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packs_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_packs_cc(vector signed long long __a, vector signed long long __b,
+ int *__cc) {
+ return __builtin_s390_vpksgs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packs_cc(vector unsigned long long __a, vector unsigned long long __b,
+ int *__cc) {
+ return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_packsu -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu(vector signed short __a, vector signed short __b) {
+ const vector signed short __zero = (vector signed short)0;
+ return __builtin_s390_vpklsh(
+ (vector unsigned short)(__a >= __zero) & (vector unsigned short)__a,
+ (vector unsigned short)(__b >= __zero) & (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu(vector signed int __a, vector signed int __b) {
+ const vector signed int __zero = (vector signed int)0;
+ return __builtin_s390_vpklsf(
+ (vector unsigned int)(__a >= __zero) & (vector unsigned int)__a,
+ (vector unsigned int)(__b >= __zero) & (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu(vector signed long long __a, vector signed long long __b) {
+ const vector signed long long __zero = (vector signed long long)0;
+ return __builtin_s390_vpklsg(
+ (vector unsigned long long)(__a >= __zero) &
+ (vector unsigned long long)__a,
+ (vector unsigned long long)(__b >= __zero) &
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packsu_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu_cc(vector unsigned long long __a, vector unsigned long long __b,
+ int *__cc) {
+ return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_unpackh ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_unpackh(vector signed char __a) {
+ return __builtin_s390_vuphb(__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_unpackh(vector bool char __a) {
+ return (vector bool short)__builtin_s390_vuphb((vector signed char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_unpackh(vector unsigned char __a) {
+ return __builtin_s390_vuplhb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_unpackh(vector signed short __a) {
+ return __builtin_s390_vuphh(__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_unpackh(vector bool short __a) {
+ return (vector bool int)__builtin_s390_vuphh((vector signed short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_unpackh(vector unsigned short __a) {
+ return __builtin_s390_vuplhh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_unpackh(vector signed int __a) {
+ return __builtin_s390_vuphf(__a);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_unpackh(vector bool int __a) {
+ return (vector bool long long)__builtin_s390_vuphf((vector signed int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_unpackh(vector unsigned int __a) {
+ return __builtin_s390_vuplhf(__a);
+}
+
+/*-- vec_unpackl ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_unpackl(vector signed char __a) {
+ return __builtin_s390_vuplb(__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_unpackl(vector bool char __a) {
+ return (vector bool short)__builtin_s390_vuplb((vector signed char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_unpackl(vector unsigned char __a) {
+ return __builtin_s390_vupllb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_unpackl(vector signed short __a) {
+ return __builtin_s390_vuplhw(__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_unpackl(vector bool short __a) {
+ return (vector bool int)__builtin_s390_vuplhw((vector signed short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_unpackl(vector unsigned short __a) {
+ return __builtin_s390_vupllh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_unpackl(vector signed int __a) {
+ return __builtin_s390_vuplf(__a);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_unpackl(vector bool int __a) {
+ return (vector bool long long)__builtin_s390_vuplf((vector signed int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_unpackl(vector unsigned int __a) {
+ return __builtin_s390_vupllf(__a);
+}
+
+/*-- vec_cmpeq --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector double __a, vector double __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+/*-- vec_cmpge --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpge(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpge(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpge(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector double __a, vector double __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+/*-- vec_cmpgt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpgt(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpgt(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpgt(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector double __a, vector double __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+/*-- vec_cmple --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmple(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmple(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmple(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmple(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmple(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmple(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector double __a, vector double __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+/*-- vec_cmplt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmplt(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmplt(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmplt(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector double __a, vector double __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+/*-- vec_all_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_nge ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_ngt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_ngt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nle ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nle(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nlt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nlt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nan ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nan(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_numeric --------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_numeric(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_any_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_nge ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_ngt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_ngt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nle ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nle(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nlt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nlt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nan ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nan(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc != 3;
+}
+
+/*-- vec_any_numeric --------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_numeric(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_andc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_andc(vector bool char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector signed char __a, vector signed char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector bool char __a, vector signed char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector signed char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector unsigned char __a, vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector bool char __a, vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector unsigned char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_andc(vector bool short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector signed short __a, vector signed short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector bool short __a, vector signed short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector signed short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector unsigned short __a, vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector bool short __a, vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector unsigned short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_andc(vector bool int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector signed int __a, vector signed int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector bool int __a, vector signed int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector signed int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector unsigned int __a, vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector bool int __a, vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector unsigned int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_andc(vector bool long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector signed long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector bool long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector signed long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector bool long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector unsigned long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector double __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector bool long long __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector double __a, vector bool long long __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+/*-- vec_nor ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_nor(vector bool char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector signed char __a, vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector bool char __a, vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector signed char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector unsigned char __a, vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector bool char __a, vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector unsigned char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_nor(vector bool short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector signed short __a, vector signed short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector bool short __a, vector signed short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector signed short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector unsigned short __a, vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector bool short __a, vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector unsigned short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_nor(vector bool int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector signed int __a, vector signed int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector bool int __a, vector signed int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector signed int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector unsigned int __a, vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector bool int __a, vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector unsigned int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_nor(vector bool long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector signed long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector bool long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector signed long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector bool long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector unsigned long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector double __a, vector double __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector bool long long __a, vector double __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector double __a, vector bool long long __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+/*-- vec_cntlz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cntlz(vector signed char __a) {
+ return __builtin_s390_vclzb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cntlz(vector unsigned char __a) {
+ return __builtin_s390_vclzb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cntlz(vector signed short __a) {
+ return __builtin_s390_vclzh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cntlz(vector unsigned short __a) {
+ return __builtin_s390_vclzh(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cntlz(vector signed int __a) {
+ return __builtin_s390_vclzf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cntlz(vector unsigned int __a) {
+ return __builtin_s390_vclzf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cntlz(vector signed long long __a) {
+ return __builtin_s390_vclzg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cntlz(vector unsigned long long __a) {
+ return __builtin_s390_vclzg(__a);
+}
+
+/*-- vec_cnttz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cnttz(vector signed char __a) {
+ return __builtin_s390_vctzb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cnttz(vector unsigned char __a) {
+ return __builtin_s390_vctzb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cnttz(vector signed short __a) {
+ return __builtin_s390_vctzh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cnttz(vector unsigned short __a) {
+ return __builtin_s390_vctzh(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cnttz(vector signed int __a) {
+ return __builtin_s390_vctzf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cnttz(vector unsigned int __a) {
+ return __builtin_s390_vctzf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cnttz(vector signed long long __a) {
+ return __builtin_s390_vctzg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cnttz(vector unsigned long long __a) {
+ return __builtin_s390_vctzg(__a);
+}
+
+/*-- vec_popcnt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_popcnt(vector signed char __a) {
+ return __builtin_s390_vpopctb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_popcnt(vector unsigned char __a) {
+ return __builtin_s390_vpopctb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_popcnt(vector signed short __a) {
+ return __builtin_s390_vpopcth((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_popcnt(vector unsigned short __a) {
+ return __builtin_s390_vpopcth(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_popcnt(vector signed int __a) {
+ return __builtin_s390_vpopctf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_popcnt(vector unsigned int __a) {
+ return __builtin_s390_vpopctf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_popcnt(vector signed long long __a) {
+ return __builtin_s390_vpopctg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_popcnt(vector unsigned long long __a) {
+ return __builtin_s390_vpopctg(__a);
+}
+
+/*-- vec_rl -----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_rl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_verllvb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_rl(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_verllvb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_rl(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_verllvh(
+ (vector unsigned short)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_rl(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_verllvh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_rl(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_verllvf(
+ (vector unsigned int)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_rl(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_verllvf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_rl(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_verllvg(
+ (vector unsigned long long)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_verllvg(__a, __b);
+}
+
+/*-- vec_rli ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_rli(vector signed char __a, unsigned long __b) {
+ return (vector signed char)__builtin_s390_verllb(
+ (vector unsigned char)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_rli(vector unsigned char __a, unsigned long __b) {
+ return __builtin_s390_verllb(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_rli(vector signed short __a, unsigned long __b) {
+ return (vector signed short)__builtin_s390_verllh(
+ (vector unsigned short)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_rli(vector unsigned short __a, unsigned long __b) {
+ return __builtin_s390_verllh(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_rli(vector signed int __a, unsigned long __b) {
+ return (vector signed int)__builtin_s390_verllf(
+ (vector unsigned int)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_rli(vector unsigned int __a, unsigned long __b) {
+ return __builtin_s390_verllf(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_rli(vector signed long long __a, unsigned long __b) {
+ return (vector signed long long)__builtin_s390_verllg(
+ (vector unsigned long long)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_rli(vector unsigned long long __a, unsigned long __b) {
+ return __builtin_s390_verllg(__a, (int)__b);
+}
+
+/*-- vec_rl_mask ------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_rl_mask(vector signed char __a, vector unsigned char __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned char
+vec_rl_mask(vector unsigned char __a, vector unsigned char __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed short
+vec_rl_mask(vector signed short __a, vector unsigned short __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned short
+vec_rl_mask(vector unsigned short __a, vector unsigned short __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed int
+vec_rl_mask(vector signed int __a, vector unsigned int __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned int
+vec_rl_mask(vector unsigned int __a, vector unsigned int __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed long long
+vec_rl_mask(vector signed long long __a, vector unsigned long long __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned long long
+vec_rl_mask(vector unsigned long long __a, vector unsigned long long __b,
+ unsigned char __c) __constant(__c);
+
+#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \
+ __extension__ ({ \
+ vector unsigned char __res; \
+ vector unsigned char __x = (vector unsigned char)(X); \
+ vector unsigned char __y = (vector unsigned char)(Y); \
+ switch (sizeof ((X)[0])) { \
+ case 1: __res = (vector unsigned char) __builtin_s390_verimb( \
+ (vector unsigned char)__x, (vector unsigned char)__x, \
+ (vector unsigned char)__y, (Z)); break; \
+ case 2: __res = (vector unsigned char) __builtin_s390_verimh( \
+ (vector unsigned short)__x, (vector unsigned short)__x, \
+ (vector unsigned short)__y, (Z)); break; \
+ case 4: __res = (vector unsigned char) __builtin_s390_verimf( \
+ (vector unsigned int)__x, (vector unsigned int)__x, \
+ (vector unsigned int)__y, (Z)); break; \
+ default: __res = (vector unsigned char) __builtin_s390_verimg( \
+ (vector unsigned long long)__x, (vector unsigned long long)__x, \
+ (vector unsigned long long)__y, (Z)); break; \
+ } __res; }))
+
+/*-- vec_sll ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsl(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_slb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_slb(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_slb(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vslb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_slb(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vslb(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_slb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vslb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_slb(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_slb(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_slb(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_slb(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_slb(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_slb(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_slb(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_slb(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_slb(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_slb(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_slb(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_slb(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_slb(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_slb(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_sld ----------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_sld(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned char
+vec_sld(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed short
+vec_sld(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned short
+vec_sld(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed int
+vec_sld(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned int
+vec_sld(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed long long
+vec_sld(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned long long
+vec_sld(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector double
+vec_sld(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 15);
+
+#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \
+ __builtin_s390_vsldb((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z)))
+
+/*-- vec_sldw ---------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_sldw(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned char
+vec_sldw(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed short
+vec_sldw(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned short
+vec_sldw(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed int
+vec_sldw(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned int
+vec_sldw(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed long long
+vec_sldw(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned long long
+vec_sldw(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector double
+vec_sldw(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 3);
+
+#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \
+ __builtin_s390_vsldb((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z) * 4))
+
+/*-- vec_sral ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsra(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srab ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srab(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srab(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrab(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srab(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vsrab(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srab(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrab(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srab(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srab(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srab(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srab(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srab(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srab(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srab(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srab(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srab(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srab(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srab(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srab(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srab(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srab(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srl ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrl(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srb(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srb(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srb(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vsrlb(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrlb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srb(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srb(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srb(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srb(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srb(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srb(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srb(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srb(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srb(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srb(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srb(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srb(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srb(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srb(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_abs ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_abs(vector signed char __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed char)0));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_abs(vector signed short __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed short)0));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_abs(vector signed int __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed int)0));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_abs(vector signed long long __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed long long)0));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_abs(vector double __a) {
+ return __builtin_s390_vflpdb(__a);
+}
+
+/*-- vec_nabs ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_nabs(vector double __a) {
+ return __builtin_s390_vflndb(__a);
+}
+
+/*-- vec_max ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector signed char __a, vector signed char __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector signed char __a, vector bool char __b) {
+ vector signed char __bc = (vector signed char)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector bool char __a, vector signed char __b) {
+ vector signed char __ac = (vector signed char)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector unsigned char __a, vector unsigned char __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector unsigned char __a, vector bool char __b) {
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector bool char __a, vector unsigned char __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector signed short __a, vector signed short __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector signed short __a, vector bool short __b) {
+ vector signed short __bc = (vector signed short)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector bool short __a, vector signed short __b) {
+ vector signed short __ac = (vector signed short)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector unsigned short __a, vector unsigned short __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector unsigned short __a, vector bool short __b) {
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector bool short __a, vector unsigned short __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector signed int __a, vector signed int __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector signed int __a, vector bool int __b) {
+ vector signed int __bc = (vector signed int)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector bool int __a, vector signed int __b) {
+ vector signed int __ac = (vector signed int)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector unsigned int __a, vector unsigned int __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector unsigned int __a, vector bool int __b) {
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector bool int __a, vector unsigned int __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector signed long long __a, vector signed long long __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector signed long long __a, vector bool long long __b) {
+ vector signed long long __bc = (vector signed long long)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector bool long long __a, vector signed long long __b) {
+ vector signed long long __ac = (vector signed long long)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector unsigned long long __a, vector bool long long __b) {
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector bool long long __a, vector unsigned long long __b) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_max(vector double __a, vector double __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+/*-- vec_min ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector signed char __a, vector signed char __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector signed char __a, vector bool char __b) {
+ vector signed char __bc = (vector signed char)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector bool char __a, vector signed char __b) {
+ vector signed char __ac = (vector signed char)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector unsigned char __a, vector unsigned char __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector unsigned char __a, vector bool char __b) {
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector bool char __a, vector unsigned char __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector signed short __a, vector signed short __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector signed short __a, vector bool short __b) {
+ vector signed short __bc = (vector signed short)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector bool short __a, vector signed short __b) {
+ vector signed short __ac = (vector signed short)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector unsigned short __a, vector unsigned short __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector unsigned short __a, vector bool short __b) {
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector bool short __a, vector unsigned short __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector signed int __a, vector signed int __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector signed int __a, vector bool int __b) {
+ vector signed int __bc = (vector signed int)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector bool int __a, vector signed int __b) {
+ vector signed int __ac = (vector signed int)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector unsigned int __a, vector unsigned int __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector unsigned int __a, vector bool int __b) {
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector bool int __a, vector unsigned int __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector signed long long __a, vector signed long long __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector signed long long __a, vector bool long long __b) {
+ vector signed long long __bc = (vector signed long long)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector bool long long __a, vector signed long long __b) {
+ vector signed long long __ac = (vector signed long long)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector unsigned long long __a, vector bool long long __b) {
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector bool long long __a, vector unsigned long long __b) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_min(vector double __a, vector double __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+/*-- vec_add_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaq(__a, __b);
+}
+
+/*-- vec_addc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_addc(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaccb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_addc(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vacch(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_addc(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vaccf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_addc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vaccg(__a, __b);
+}
+
+/*-- vec_addc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaccq(__a, __b);
+}
+
+/*-- vec_adde_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vacq(__a, __b, __c);
+}
+
+/*-- vec_addec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vacccq(__a, __b, __c);
+}
+
+/*-- vec_avg ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_avg(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vavgb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_avg(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vavgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_avg(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vavgf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_avg(vector signed long long __a, vector signed long long __b) {
+ return __builtin_s390_vavgg(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_avg(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vavglb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_avg(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vavglh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_avg(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vavglf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_avg(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vavglg(__a, __b);
+}
+
+/*-- vec_checksum -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned int
+vec_checksum(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vcksm(__a, __b);
+}
+
+/*-- vec_gfmsum -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_gfmsum(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vgfmb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gfmsum(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vgfmh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gfmsum(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vgfmf(__a, __b);
+}
+
+/*-- vec_gfmsum_128 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_gfmsum_128(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vgfmg(__a, __b);
+}
+
+/*-- vec_gfmsum_accum -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_gfmsum_accum(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vgfmab(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gfmsum_accum(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vgfmah(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gfmsum_accum(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vgfmaf(__a, __b, __c);
+}
+
+/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_gfmsum_accum_128(vector unsigned long long __a,
+ vector unsigned long long __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vgfmag(__a, __b, __c);
+}
+
+/*-- vec_mladd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector signed char __a, vector signed char __b,
+ vector signed char __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector unsigned char __a, vector signed char __b,
+ vector signed char __c) {
+ return (vector signed char)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector signed char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __a * (vector signed char)__b + (vector signed char)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mladd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector unsigned short __a, vector signed short __b,
+ vector signed short __c) {
+ return (vector signed short)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector signed short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * (vector signed short)__b + (vector signed short)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mladd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector unsigned int __a, vector signed int __b,
+ vector signed int __c) {
+ return (vector signed int)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector signed int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __a * (vector signed int)__b + (vector signed int)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mladd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __a * __b + __c;
+}
+
+/*-- vec_mhadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mhadd(vector signed char __a, vector signed char __b,
+ vector signed char __c) {
+ return __builtin_s390_vmahb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mhadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vmalhb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mhadd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __builtin_s390_vmahh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mhadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmalhh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mhadd(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ return __builtin_s390_vmahf(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mhadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmalhf(__a, __b, __c);
+}
+
+/*-- vec_meadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_meadd(vector signed char __a, vector signed char __b,
+ vector signed short __c) {
+ return __builtin_s390_vmaeb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_meadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmaleb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_meadd(vector signed short __a, vector signed short __b,
+ vector signed int __c) {
+ return __builtin_s390_vmaeh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_meadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmaleh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_meadd(vector signed int __a, vector signed int __b,
+ vector signed long long __c) {
+ return __builtin_s390_vmaef(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_meadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vmalef(__a, __b, __c);
+}
+
+/*-- vec_moadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_moadd(vector signed char __a, vector signed char __b,
+ vector signed short __c) {
+ return __builtin_s390_vmaob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_moadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmalob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_moadd(vector signed short __a, vector signed short __b,
+ vector signed int __c) {
+ return __builtin_s390_vmaoh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_moadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmaloh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_moadd(vector signed int __a, vector signed int __b,
+ vector signed long long __c) {
+ return __builtin_s390_vmaof(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_moadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vmalof(__a, __b, __c);
+}
+
+/*-- vec_mulh ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mulh(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mulh(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmlhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mulh(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mulh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmlhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mulh(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmhf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mulh(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlhf(__a, __b);
+}
+
+/*-- vec_mule ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_mule(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mule(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmleb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mule(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mule(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmleh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mule(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmef(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mule(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlef(__a, __b);
+}
+
+/*-- vec_mulo ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_mulo(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmob(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mulo(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmlob(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mulo(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmoh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mulo(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmloh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mulo(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmof(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mulo(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlof(__a, __b);
+}
+
+/*-- vec_sub_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsq(__a, __b);
+}
+
+/*-- vec_subc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_subc(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vscbib(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_subc(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vscbih(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_subc(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vscbif(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_subc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vscbig(__a, __b);
+}
+
+/*-- vec_subc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vscbiq(__a, __b);
+}
+
+/*-- vec_sube_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vsbiq(__a, __b, __c);
+}
+
+/*-- vec_subec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vsbcbiq(__a, __b, __c);
+}
+
+/*-- vec_sum2 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sum2(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vsumgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sum2(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vsumgf(__a, __b);
+}
+
+/*-- vec_sum_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sum_u128(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vsumqf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sum_u128(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vsumqg(__a, __b);
+}
+
+/*-- vec_sum4 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sum4(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsumb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sum4(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vsumh(__a, __b);
+}
+
+/*-- vec_test_mask ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed char __a, vector unsigned char __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vtm(__a, __b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed short __a, vector unsigned short __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed int __a, vector unsigned int __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector double __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+/*-- vec_madd ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_madd(vector double __a, vector double __b, vector double __c) {
+ return __builtin_s390_vfmadb(__a, __b, __c);
+}
+
+/*-- vec_msub ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_msub(vector double __a, vector double __b, vector double __c) {
+ return __builtin_s390_vfmsdb(__a, __b, __c);
+}
+
+/*-- vec_sqrt ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_sqrt(vector double __a) {
+ return __builtin_s390_vfsqdb(__a);
+}
+
+/*-- vec_ld2f ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_ld2f(const float *__ptr) {
+ typedef float __v2f32 __attribute__((__vector_size__(8)));
+ return __builtin_convertvector(*(const __v2f32 *)__ptr, vector double);
+}
+
+/*-- vec_st2f ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai void
+vec_st2f(vector double __a, float *__ptr) {
+ typedef float __v2f32 __attribute__((__vector_size__(8)));
+ *(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32);
+}
+
+/*-- vec_ctd ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector double
+vec_ctd(vector signed long long __a, int __b)
+ __constant_range(__b, 0, 31) {
+ vector double __conv = __builtin_convertvector(__a, vector double);
+ __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ return __conv;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_ctd(vector unsigned long long __a, int __b)
+ __constant_range(__b, 0, 31) {
+ vector double __conv = __builtin_convertvector(__a, vector double);
+ __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ return __conv;
+}
+
+/*-- vec_ctsl ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_ctsl(vector double __a, int __b)
+ __constant_range(__b, 0, 31) {
+ __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+ return __builtin_convertvector(__a, vector signed long long);
+}
+
+/*-- vec_ctul ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_ctul(vector double __a, int __b)
+ __constant_range(__b, 0, 31) {
+ __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+ return __builtin_convertvector(__a, vector unsigned long long);
+}
+
+/*-- vec_roundp -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundp(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_ceil ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_ceil(vector double __a) {
+ // On this platform, vec_ceil never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_roundm -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundm(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_floor --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_floor(vector double __a) {
+ // On this platform, vec_floor never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_roundz -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundz(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_trunc --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_trunc(vector double __a) {
+ // On this platform, vec_trunc never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_roundc -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundc(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 0);
+}
+
+/*-- vec_round --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_round(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 4);
+}
+
+/*-- vec_fp_test_data_class -------------------------------------------------*/
+
+#define vec_fp_test_data_class(X, Y, Z) \
+ ((vector bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
+
+/*-- vec_cp_until_zero ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cp_until_zero(vector signed char __a) {
+ return (vector signed char)__builtin_s390_vistrb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cp_until_zero(vector bool char __a) {
+ return (vector bool char)__builtin_s390_vistrb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cp_until_zero(vector unsigned char __a) {
+ return __builtin_s390_vistrb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cp_until_zero(vector signed short __a) {
+ return (vector signed short)__builtin_s390_vistrh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cp_until_zero(vector bool short __a) {
+ return (vector bool short)__builtin_s390_vistrh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cp_until_zero(vector unsigned short __a) {
+ return __builtin_s390_vistrh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cp_until_zero(vector signed int __a) {
+ return (vector signed int)__builtin_s390_vistrf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cp_until_zero(vector bool int __a) {
+ return (vector bool int)__builtin_s390_vistrf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cp_until_zero(vector unsigned int __a) {
+ return __builtin_s390_vistrf(__a);
+}
+
+/*-- vec_cp_until_zero_cc ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cp_until_zero_cc(vector signed char __a, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cp_until_zero_cc(vector bool char __a, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cp_until_zero_cc(vector unsigned char __a, int *__cc) {
+ return __builtin_s390_vistrbs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cp_until_zero_cc(vector signed short __a, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cp_until_zero_cc(vector bool short __a, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cp_until_zero_cc(vector unsigned short __a, int *__cc) {
+ return __builtin_s390_vistrhs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cp_until_zero_cc(vector signed int __a, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vistrfs((vector unsigned int)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cp_until_zero_cc(vector bool int __a, int *__cc) {
+ return (vector bool int)__builtin_s390_vistrfs((vector unsigned int)__a,
+ __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cp_until_zero_cc(vector unsigned int __a, int *__cc) {
+ return __builtin_s390_vistrfs(__a, __cc);
+}
+
+/*-- vec_cmpeq_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeeb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeeb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeeh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeeh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfeef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfeef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfeef(__a, __b);
+}
+
+/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfeebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfeebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfeebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfeehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfeehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfeehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfeefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfeefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vfeefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfeezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfeezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfeezf(__a, __b);
+}
+
+/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfeezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfeezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfeezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfeezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfeezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfeezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfeezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfeezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfeezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeneb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeneb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeneb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeneh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeneh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeneh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfenef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfenef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfenef(__a, __b);
+}
+
+/*-- vec_cmpne_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfenebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfenebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfenebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfenehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfenehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfenehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfenefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfenefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vfenefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfenezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfenezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfenezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfenezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfenezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfenezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfenezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfenezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfenezf(__a, __b);
+}
+
+/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfenezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfenezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfenezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfenezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfenezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfenezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfenezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfenezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfenezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmprg --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmprg(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmprg(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmprg(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
+}
+
+/*-- vec_cmprg_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmprg_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmprg_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmprg_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
+}
+
+/*-- vec_cmprg_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrcb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrch(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrcf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrczb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrczh(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrczf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmpnrg -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpnrg(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpnrg(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpnrg(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
+}
+
+/*-- vec_cmpnrg_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpnrg_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpnrg_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpnrg_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
+}
+
+/*-- vec_cmpnrg_idx ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrcb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrch(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrcf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrczb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrczh(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrczf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_find_any_eq --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector signed char __a, vector signed char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector bool char __a, vector bool char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vfaeb(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector signed short __a, vector signed short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector bool short __a, vector bool short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vfaeh(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector signed int __a, vector signed int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector bool int __a, vector bool int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vfaef(__a, __b, 4);
+}
+
+/*-- vec_find_any_eq_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return (vector bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return (vector bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return (vector bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
+}
+
+/*-- vec_find_any_eq_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaeb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaeh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaef(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaebs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaefs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaezb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaezh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaezf(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx_cc(vector bool char __a, vector bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx_cc(vector unsigned short __a,
+ vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vfaezhs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_or_0_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx_cc(vector bool int __a, vector bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_ne --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector signed char __a, vector signed char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector bool char __a, vector bool char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vfaeb(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector signed short __a, vector signed short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector bool short __a, vector bool short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vfaeh(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector signed int __a, vector signed int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector bool int __a, vector bool int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vfaef(__a, __b, 12);
+}
+
+/*-- vec_find_any_ne_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return (vector bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return (vector bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return (vector bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
+}
+
+/*-- vec_find_any_ne_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaeb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaeh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaef(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaebs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaefs(__a, __b, 8, __cc);
+}
+
+/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaezb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaezh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaezf(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx_cc(vector bool char __a, vector bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx_cc(vector unsigned short __a,
+ vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vfaezhs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_or_0_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx_cc(vector bool int __a, vector bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
+}
+
+#undef __constant_pow2_range
+#undef __constant_range
+#undef __constant
+#undef __ATTRS_o
+#undef __ATTRS_o_ai
+#undef __ATTRS_ai
+
+#else
+
+#error "Use -fzvector to enable vector extensions"
+
+#endif
diff --git a/current/clang-include/wmmintrin.h b/current/clang-include/wmmintrin.h
new file mode 100644
index 0000000..a2d9310
--- /dev/null
+++ b/current/clang-include/wmmintrin.h
@@ -0,0 +1,33 @@
+/*===---- wmmintrin.h - AES intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _WMMINTRIN_H
+#define _WMMINTRIN_H
+
+#include <emmintrin.h>
+
+#include <__wmmintrin_aes.h>
+
+#include <__wmmintrin_pclmul.h>
+
+#endif /* _WMMINTRIN_H */
diff --git a/current/clang-include/x86intrin.h b/current/clang-include/x86intrin.h
new file mode 100644
index 0000000..81a404f
--- /dev/null
+++ b/current/clang-include/x86intrin.h
@@ -0,0 +1,85 @@
+/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#define __X86INTRIN_H
+
+#include <ia32intrin.h>
+
+#include <immintrin.h>
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__3dNOW__)
+#include <mm3dnow.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
+#include <bmiintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
+#include <bmi2intrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
+#include <lzcntintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__)
+#include <popcntintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__)
+#include <rdseedintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PRFCHW__)
+#include <prfchwintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE4A__)
+#include <ammintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA4__)
+#include <fma4intrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XOP__)
+#include <xopintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__TBM__)
+#include <tbmintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
+#include <f16cintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MWAITX__)
+#include <mwaitxintrin.h>
+#endif
+
+/* FIXME: LWP */
+
+#endif /* __X86INTRIN_H */
diff --git a/current/clang-include/xmmintrin.h b/current/clang-include/xmmintrin.h
new file mode 100644
index 0000000..3110e8b
--- /dev/null
+++ b/current/clang-include/xmmintrin.h
@@ -0,0 +1,2893 @@
+/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __XMMINTRIN_H
+#define __XMMINTRIN_H
+
+#include <mmintrin.h>
+
+typedef int __v4si __attribute__((__vector_size__(16)));
+typedef float __v4sf __attribute__((__vector_size__(16)));
+typedef float __m128 __attribute__((__vector_size__(16)));
+
+/* Unsigned types */
+typedef unsigned int __v4su __attribute__((__vector_size__(16)));
+
+/* This header should only be included in a hosted environment as it depends on
+ * a standard library to provide allocation routines. */
+#if __STDC_HOSTED__
+#include <mm_malloc.h>
+#endif
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse")))
+
+/// \brief Adds the 32-bit float values in the low-order bits of the operands.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDSS / ADDSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// The lower 32 bits of this operand are used in the calculation.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// The lower 32 bits of this operand are used in the calculation.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the sum
+/// of the lower 32 bits of both operands. The upper 96 bits are copied from
+/// the upper 96 bits of the first source operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_add_ss(__m128 __a, __m128 __b)
+{
+ __a[0] += __b[0];
+ return __a;
+}
+
+/// \brief Adds two 128-bit vectors of [4 x float], and returns the results of
+/// the addition.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VADDPS / ADDPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// \returns A 128-bit vector of [4 x float] containing the sums of both
+/// operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_add_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4sf)__a + (__v4sf)__b);
+}
+
+/// \brief Subtracts the 32-bit float value in the low-order bits of the second
+/// operand from the corresponding value in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSUBSS / SUBSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the minuend. The lower 32 bits
+/// of this operand are used in the calculation.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing the subtrahend. The lower 32
+/// bits of this operand are used in the calculation.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+/// difference of the lower 32 bits of both operands. The upper 96 bits are
+/// copied from the upper 96 bits of the first source operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sub_ss(__m128 __a, __m128 __b)
+{
+ __a[0] -= __b[0];
+ return __a;
+}
+
+/// \brief Subtracts each of the values of the second operand from the first
+/// operand, both of which are 128-bit vectors of [4 x float] and returns
+/// the results of the subtraction.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSUBPS / SUBPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the minuend.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing the subtrahend.
+/// \returns A 128-bit vector of [4 x float] containing the differences between
+/// both operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sub_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4sf)__a - (__v4sf)__b);
+}
+
+/// \brief Multiplies two 32-bit float values in the low-order bits of the
+/// operands.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMULSS / MULSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// The lower 32 bits of this operand are used in the calculation.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// The lower 32 bits of this operand are used in the calculation.
+/// \returns A 128-bit vector of [4 x float] containing the product of the lower
+/// 32 bits of both operands. The upper 96 bits are copied from the upper 96
+/// bits of the first source operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mul_ss(__m128 __a, __m128 __b)
+{
+ __a[0] *= __b[0];
+ return __a;
+}
+
+/// \brief Multiplies two 128-bit vectors of [4 x float] and returns the
+/// results of the multiplication.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMULPS / MULPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// \returns A 128-bit vector of [4 x float] containing the products of both
+/// operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mul_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4sf)__a * (__v4sf)__b);
+}
+
+/// \brief Divides the value in the low-order 32 bits of the first operand by
+/// the corresponding value in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VDIVSS / DIVSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the dividend. The lower 32
+/// bits of this operand are used in the calculation.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing the divisor. The lower 32 bits
+/// of this operand are used in the calculation.
+/// \returns A 128-bit vector of [4 x float] containing the quotients of the
+/// lower 32 bits of both operands. The upper 96 bits are copied from the
+/// upper 96 bits of the first source operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_div_ss(__m128 __a, __m128 __b)
+{
+ __a[0] /= __b[0];
+ return __a;
+}
+
+/// \brief Divides two 128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VDIVPS / DIVPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the dividend.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing the divisor.
+/// \returns A 128-bit vector of [4 x float] containing the quotients of both
+/// operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_div_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4sf)__a / (__v4sf)__b);
+}
+
+/// \brief Calculates the square root of the value stored in the low-order bits
+/// of a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSQRTSS / SQRTSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the calculation.
+/// \returns A 128-bit vector of [4 x float] containing the square root of the
+/// value in the low-order bits of the operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sqrt_ss(__m128 __a)
+{
+ __m128 __c = __builtin_ia32_sqrtss((__v4sf)__a);
+ return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+}
+
+/// \brief Calculates the square roots of the values stored in a 128-bit vector
+/// of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSQRTPS / SQRTPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the square roots of the
+/// values in the operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sqrt_ps(__m128 __a)
+{
+ return __builtin_ia32_sqrtps((__v4sf)__a);
+}
+
+/// \brief Calculates the approximate reciprocal of the value stored in the
+/// low-order bits of a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRCPSS / RCPSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the calculation.
+/// \returns A 128-bit vector of [4 x float] containing the approximate
+/// reciprocal of the value in the low-order bits of the operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp_ss(__m128 __a)
+{
+ __m128 __c = __builtin_ia32_rcpss((__v4sf)__a);
+ return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+}
+
+/// \brief Calculates the approximate reciprocals of the values stored in a
+/// 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRCPPS / RCPPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the approximate
+/// reciprocals of the values in the operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp_ps(__m128 __a)
+{
+ return __builtin_ia32_rcpps((__v4sf)__a);
+}
+
+/// \brief Calculates the approximate reciprocal of the square root of the value
+/// stored in the low-order bits of a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRSQRTSS / RSQRTSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the calculation.
+/// \returns A 128-bit vector of [4 x float] containing the approximate
+/// reciprocal of the square root of the value in the low-order bits of the
+/// operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt_ss(__m128 __a)
+{
+ __m128 __c = __builtin_ia32_rsqrtss((__v4sf)__a);
+ return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+}
+
+/// \brief Calculates the approximate reciprocals of the square roots of the
+/// values stored in a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VRSQRTPS / RSQRTPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the approximate
+/// reciprocals of the square roots of the values in the operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt_ps(__m128 __a)
+{
+ return __builtin_ia32_rsqrtps((__v4sf)__a);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands and returns the lesser value in the low-order bits of the
+/// vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMINSS / MINSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+/// minimum value between both operands. The upper 96 bits are copied from
+/// the upper 96 bits of the first source operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_min_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 128-bit vectors of [4 x float] and returns the
+/// lesser of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMINPS / MINPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands.
+/// \returns A 128-bit vector of [4 x float] containing the minimum values
+/// between both operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_min_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_minps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands and returns the greater value in the low-order bits of
+/// a vector [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMAXSS / MAXSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+/// maximum value between both operands. The upper 96 bits are copied from
+/// the upper 96 bits of the first source operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_max_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_maxss((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 128-bit vectors of [4 x float] and returns the greater
+/// of each pair of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMAXPS / MAXPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands.
+/// \returns A 128-bit vector of [4 x float] containing the maximum values
+/// between both operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_max_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_maxps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Performs a bitwise AND of two 128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDPS / ANDPS instructions.
+///
+/// \param __a
+/// A 128-bit vector containing one of the source operands.
+/// \param __b
+/// A 128-bit vector containing one of the source operands.
+/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the
+/// values between both operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_and_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4su)__a & (__v4su)__b);
+}
+
+/// \brief Performs a bitwise AND of two 128-bit vectors of [4 x float], using
+/// the one's complement of the values contained in the first source
+/// operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VANDNPS / ANDNPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the first source operand. The
+/// one's complement of this value is used in the bitwise AND.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing the second source operand.
+/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the
+/// one's complement of the first operand and the values in the second
+/// operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_andnot_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)(~(__v4su)__a & (__v4su)__b);
+}
+
+/// \brief Performs a bitwise OR of two 128-bit vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VORPS / ORPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// \returns A 128-bit vector of [4 x float] containing the bitwise OR of the
+/// values between both operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_or_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4su)__a | (__v4su)__b);
+}
+
+/// \brief Performs a bitwise exclusive OR of two 128-bit vectors of
+/// [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VXORPS / XORPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the source operands.
+/// \returns A 128-bit vector of [4 x float] containing the bitwise exclusive OR
+/// of the values between both operands.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_xor_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4su)__a ^ (__v4su)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands for equality and returns the result of the comparison in the
+/// low-order bits of a vector [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPEQSS / CMPEQSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpeq_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpeqss((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] for equality.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPEQPS / CMPEQPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpeq_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpeqps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the value in the first operand is less than the
+/// corresponding value in the second operand and returns the result of the
+/// comparison in the low-order bits of a vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLTSS / CMPLTSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmplt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpltss((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] to determine if the values in the first
+/// operand are less than those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLTPS / CMPLTPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmplt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpltps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the value in the first operand is less than or
+/// equal to the corresponding value in the second operand and returns the
+/// result of the comparison in the low-order bits of a vector of
+/// [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLESS / CMPLESS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmple_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpless((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] to determine if the values in the first
+/// operand are less than or equal to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLEPS / CMPLEPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmple_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpleps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the value in the first operand is greater than
+/// the corresponding value in the second operand and returns the result of
+/// the comparison in the low-order bits of a vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLTSS / CMPLTSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpgt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector((__v4sf)__a,
+ (__v4sf)__builtin_ia32_cmpltss((__v4sf)__b, (__v4sf)__a),
+ 4, 1, 2, 3);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] to determine if the values in the first
+/// operand are greater than those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLTPS / CMPLTPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpgt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpltps((__v4sf)__b, (__v4sf)__a);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the value in the first operand is greater than
+/// or equal to the corresponding value in the second operand and returns
+/// the result of the comparison in the low-order bits of a vector of
+/// [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLESS / CMPLESS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpge_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector((__v4sf)__a,
+ (__v4sf)__builtin_ia32_cmpless((__v4sf)__b, (__v4sf)__a),
+ 4, 1, 2, 3);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] to determine if the values in the first
+/// operand are greater than or equal to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPLEPS / CMPLEPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpge_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands for inequality and returns the result of the comparison in the
+/// low-order bits of a vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNEQSS / CMPNEQSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpneq_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpneqss((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] for inequality.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNEQPS / CMPNEQPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpneq_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpneqps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the value in the first operand is not less than
+/// the corresponding value in the second operand and returns the result of
+/// the comparison in the low-order bits of a vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLTSS / CMPNLTSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnlt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnltss((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] to determine if the values in the first
+/// operand are not less than those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLTPS / CMPNLTPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnlt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnltps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the value in the first operand is not less than
+/// or equal to the corresponding value in the second operand and returns
+/// the result of the comparison in the low-order bits of a vector of
+/// [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLESS / CMPNLESS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnle_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnless((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] to determine if the values in the first
+/// operand are not less than or equal to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLEPS / CMPNLEPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnle_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnleps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the value in the first operand is not greater
+/// than the corresponding value in the second operand and returns the
+/// result of the comparison in the low-order bits of a vector of
+/// [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLTSS / CMPNLTSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpngt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector((__v4sf)__a,
+ (__v4sf)__builtin_ia32_cmpnltss((__v4sf)__b, (__v4sf)__a),
+ 4, 1, 2, 3);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] to determine if the values in the first
+/// operand are not greater than those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLTPS / CMPNLTPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpngt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnltps((__v4sf)__b, (__v4sf)__a);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the value in the first operand is not greater
+/// than or equal to the corresponding value in the second operand and
+/// returns the result of the comparison in the low-order bits of a vector
+/// of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLESS / CMPNLESS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnge_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector((__v4sf)__a,
+ (__v4sf)__builtin_ia32_cmpnless((__v4sf)__b, (__v4sf)__a),
+ 4, 1, 2, 3);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] to determine if the values in the first
+/// operand are not greater than or equal to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPNLEPS / CMPNLEPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnge_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnleps((__v4sf)__b, (__v4sf)__a);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the value in the first operand is ordered with
+/// respect to the corresponding value in the second operand and returns the
+/// result of the comparison in the low-order bits of a vector of
+/// [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPORDSS / CMPORDSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpord_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpordss((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] to determine if the values in the first
+/// operand are ordered with respect to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPORDPS / CMPORDPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpord_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpordps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the value in the first operand is unordered
+/// with respect to the corresponding value in the second operand and
+/// returns the result of the comparison in the low-order bits of a vector
+/// of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPUNORDSS / CMPUNORDSS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float] containing one of the operands. The lower
+/// 32 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [4 x float] containing the comparison results
+/// in the low-order bits.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpunord_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpunordss((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares each of the corresponding 32-bit float values of the
+/// 128-bit vectors of [4 x float] to determine if the values in the first
+/// operand are unordered with respect to those in the second operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCMPUNORDPS / CMPUNORDPS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpunord_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpunordps((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands for equality and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comieq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comieq((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the first operand is less than the second
+/// operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comilt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comilt((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the first operand is less than or equal to the
+/// second operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comile_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comile((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the first operand is greater than the second
+/// operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comigt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comigt((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the first operand is greater than or equal to
+/// the second operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comige_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comige((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Compares two 32-bit float values in the low-order bits of both
+/// operands to determine if the first operand is not equal to the second
+/// operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comineq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comineq((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Performs an unordered comparison of two 32-bit float values using
+/// the low-order bits of both operands to determine equality and returns
+/// the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomieq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomieq((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Performs an unordered comparison of two 32-bit float values using
+/// the low-order bits of both operands to determine if the first operand is
+/// less than the second operand and returns the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomilt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomilt((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Performs an unordered comparison of two 32-bit float values using
+/// the low-order bits of both operands to determine if the first operand
+/// is less than or equal to the second operand and returns the result of
+/// the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomile_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomile((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Performs an unordered comparison of two 32-bit float values using
+/// the low-order bits of both operands to determine if the first operand
+/// is greater than the second operand and returns the result of the
+/// comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomigt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomigt((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Performs an unordered comparison of two 32-bit float values using
+/// the low-order bits of both operands to determine if the first operand is
+/// greater than or equal to the second operand and returns the result of
+/// the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomige_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomige((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Performs an unordered comparison of two 32-bit float values using
+/// the low-order bits of both operands to determine inequality and returns
+/// the result of the comparison.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the comparison.
+/// \returns An integer containing the comparison results.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomineq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomineq((__v4sf)__a, (__v4sf)__b);
+}
+
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// [4 x float] into a 32-bit integer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSS2SI / CVTSS2SI instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the conversion.
+/// \returns A 32-bit integer containing the converted value.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtss_si32(__m128 __a)
+{
+ return __builtin_ia32_cvtss2si((__v4sf)__a);
+}
+
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// [4 x float] into a 32-bit integer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSS2SI / CVTSS2SI instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the conversion.
+/// \returns A 32-bit integer containing the converted value.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvt_ss2si(__m128 __a)
+{
+ return _mm_cvtss_si32(__a);
+}
+
+#ifdef __x86_64__
+
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// [4 x float] into a 64-bit integer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSS2SI / CVTSS2SI instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the conversion.
+/// \returns A 64-bit integer containing the converted value.
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtss_si64(__m128 __a)
+{
+ return __builtin_ia32_cvtss2si64((__v4sf)__a);
+}
+
+#endif
+
+/// \brief Converts two low-order float values in a 128-bit vector of
+/// [4 x float] into a 64-bit vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPS2PI instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 64-bit integer vector containing the converted values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtps_pi32(__m128 __a)
+{
+ return (__m64)__builtin_ia32_cvtps2pi((__v4sf)__a);
+}
+
+/// \brief Converts two low-order float values in a 128-bit vector of
+/// [4 x float] into a 64-bit vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPS2PI instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 64-bit integer vector containing the converted values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvt_ps2pi(__m128 __a)
+{
+ return _mm_cvtps_pi32(__a);
+}
+
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// [4 x float] into a 32-bit integer, truncating the result when it is
+/// inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTTSS2SI / CVTTSS2SI instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the conversion.
+/// \returns A 32-bit integer containing the converted value.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvttss_si32(__m128 __a)
+{
+ return __a[0];
+}
+
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// [4 x float] into a 32-bit integer, truncating the result when it is
+/// inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTTSS2SI / CVTTSS2SI instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the conversion.
+/// \returns A 32-bit integer containing the converted value.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtt_ss2si(__m128 __a)
+{
+ return _mm_cvttss_si32(__a);
+}
+
+/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// [4 x float] into a 64-bit integer, truncating the result when it is
+/// inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTTSS2SI / CVTTSS2SI instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the conversion.
+/// \returns A 64-bit integer containing the converted value.
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvttss_si64(__m128 __a)
+{
+ return __a[0];
+}
+
+/// \brief Converts two low-order float values in a 128-bit vector of
+/// [4 x float] into a 64-bit vector of [2 x i32], truncating the result
+/// when it is inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTTPS2PI / VTTPS2PI instructions.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 64-bit integer vector containing the converted values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvttps_pi32(__m128 __a)
+{
+ return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a);
+}
+
+/// \brief Converts two low-order float values in a 128-bit vector of [4 x
+/// float] into a 64-bit vector of [2 x i32], truncating the result when it
+/// is inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTTPS2PI instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 64-bit integer vector containing the converted values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtt_ps2pi(__m128 __a)
+{
+ return _mm_cvttps_pi32(__a);
+}
+
+/// \brief Converts a 32-bit signed integer value into a floating point value
+/// and writes it to the lower 32 bits of the destination. The remaining
+/// higher order elements of the destination vector are copied from the
+/// corresponding elements in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSI2SS / CVTSI2SS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 32-bit signed integer operand containing the value to be converted.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+/// converted value of the second operand. The upper 96 bits are copied from
+/// the upper 96 bits of the first operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtsi32_ss(__m128 __a, int __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+/// \brief Converts a 32-bit signed integer value into a floating point value
+/// and writes it to the lower 32 bits of the destination. The remaining
+/// higher order elements of the destination are copied from the
+/// corresponding elements in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSI2SS / CVTSI2SS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 32-bit signed integer operand containing the value to be converted.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+/// converted value of the second operand. The upper 96 bits are copied from
+/// the upper 96 bits of the first operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvt_si2ss(__m128 __a, int __b)
+{
+ return _mm_cvtsi32_ss(__a, __b);
+}
+
+#ifdef __x86_64__
+
+/// \brief Converts a 64-bit signed integer value into a floating point value
+/// and writes it to the lower 32 bits of the destination. The remaining
+/// higher order elements of the destination are copied from the
+/// corresponding elements in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTSI2SS / CVTSI2SS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 64-bit signed integer operand containing the value to be converted.
+/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
+/// converted value of the second operand. The upper 96 bits are copied from
+/// the upper 96 bits of the first operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtsi64_ss(__m128 __a, long long __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+#endif
+
+/// \brief Converts two elements of a 64-bit vector of [2 x i32] into two
+/// floating point values and writes them to the lower 64-bits of the
+/// destination. The remaining higher order elements of the destination are
+/// copied from the corresponding elements in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 64-bit vector of [2 x i32]. The elements in this vector are converted
+/// and written to the corresponding low-order elements in the destination.
+/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
+/// converted value of the second operand. The upper 64 bits are copied from
+/// the upper 64 bits of the first operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi32_ps(__m128 __a, __m64 __b)
+{
+ return __builtin_ia32_cvtpi2ps((__v4sf)__a, (__v2si)__b);
+}
+
+/// \brief Converts two elements of a 64-bit vector of [2 x i32] into two
+/// floating point values and writes them to the lower 64-bits of the
+/// destination. The remaining higher order elements of the destination are
+/// copied from the corresponding elements in the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 64-bit vector of [2 x i32]. The elements in this vector are converted
+/// and written to the corresponding low-order elements in the destination.
+/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
+/// converted value from the second operand. The upper 64 bits are copied
+/// from the upper 64 bits of the first operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvt_pi2ps(__m128 __a, __m64 __b)
+{
+ return _mm_cvtpi32_ps(__a, __b);
+}
+
+/// \brief Extracts a float value contained in the lower 32 bits of a vector of
+/// [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
+/// used in the extraction.
+/// \returns A 32-bit float containing the extracted value.
+static __inline__ float __DEFAULT_FN_ATTRS
+_mm_cvtss_f32(__m128 __a)
+{
+ return __a[0];
+}
+
+/// \brief Loads two packed float values from the address __p into the
+/// high-order bits of a 128-bit vector of [4 x float]. The low-order bits
+/// are copied from the low-order bits of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVHPD / MOVHPD instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. Bits [63:0] are written to bits [63:0]
+/// of the destination.
+/// \param __p
+/// A pointer to two packed float values. Bits [63:0] are written to bits
+/// [127:64] of the destination.
+/// \returns A 128-bit vector of [4 x float] containing the moved values.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadh_pi(__m128 __a, const __m64 *__p)
+{
+ typedef float __mm_loadh_pi_v2f32 __attribute__((__vector_size__(8)));
+ struct __mm_loadh_pi_struct {
+ __mm_loadh_pi_v2f32 __u;
+ } __attribute__((__packed__, __may_alias__));
+ __mm_loadh_pi_v2f32 __b = ((struct __mm_loadh_pi_struct*)__p)->__u;
+ __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
+ return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5);
+}
+
+/// \brief Loads two packed float values from the address __p into the low-order
+/// bits of a 128-bit vector of [4 x float]. The high-order bits are copied
+/// from the high-order bits of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVLPD / MOVLPD instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. Bits [127:64] are written to bits
+/// [127:64] of the destination.
+/// \param __p
+/// A pointer to two packed float values. Bits [63:0] are written to bits
+/// [63:0] of the destination.
+/// \returns A 128-bit vector of [4 x float] containing the moved values.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadl_pi(__m128 __a, const __m64 *__p)
+{
+ typedef float __mm_loadl_pi_v2f32 __attribute__((__vector_size__(8)));
+ struct __mm_loadl_pi_struct {
+ __mm_loadl_pi_v2f32 __u;
+ } __attribute__((__packed__, __may_alias__));
+ __mm_loadl_pi_v2f32 __b = ((struct __mm_loadl_pi_struct*)__p)->__u;
+ __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
+ return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3);
+}
+
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+/// 32 bits of the vector are initialized with the single-precision
+/// floating-point value loaded from a specified memory location. The upper
+/// 96 bits are set to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+///
+/// \param __p
+/// A pointer to a 32-bit memory location containing a single-precision
+/// floating-point value.
+/// \returns An initialized 128-bit floating-point vector of [4 x float]. The
+/// lower 32 bits contain the value loaded from the memory location. The
+/// upper 96 bits are set to zero.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_load_ss(const float *__p)
+{
+ struct __mm_load_ss_struct {
+ float __u;
+ } __attribute__((__packed__, __may_alias__));
+ float __u = ((struct __mm_load_ss_struct*)__p)->__u;
+ return (__m128){ __u, 0, 0, 0 };
+}
+
+/// \brief Loads a 32-bit float value and duplicates it to all four vector
+/// elements of a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS + \c shuffling
+/// instruction.
+///
+/// \param __p
+/// A pointer to a float value to be loaded and duplicated.
+/// \returns A 128-bit vector of [4 x float] containing the loaded
+/// and duplicated values.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_load1_ps(const float *__p)
+{
+ struct __mm_load1_ps_struct {
+ float __u;
+ } __attribute__((__packed__, __may_alias__));
+ float __u = ((struct __mm_load1_ps_struct*)__p)->__u;
+ return (__m128){ __u, __u, __u, __u };
+}
+
+#define _mm_load_ps1(p) _mm_load1_ps(p)
+
+/// \brief Loads a 128-bit floating-point vector of [4 x float] from an aligned
+/// memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS instruction.
+///
+/// \param __p
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location has to be 128-bit aligned.
+/// \returns A 128-bit vector of [4 x float] containing the loaded valus.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_load_ps(const float *__p)
+{
+ return *(__m128*)__p;
+}
+
+/// \brief Loads a 128-bit floating-point vector of [4 x float] from an
+/// unaligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVUPS / MOVUPS instruction.
+///
+/// \param __p
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \returns A 128-bit vector of [4 x float] containing the loaded values.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadu_ps(const float *__p)
+{
+ struct __loadu_ps {
+ __m128 __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_ps*)__p)->__v;
+}
+
+/// \brief Loads four packed float values, in reverse order, from an aligned
+/// memory location to 32-bit elements in a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS + \c shuffling
+/// instruction.
+///
+/// \param __p
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location has to be 128-bit aligned.
+/// \returns A 128-bit vector of [4 x float] containing the moved values, loaded
+/// in reverse order.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadr_ps(const float *__p)
+{
+ __m128 __a = _mm_load_ps(__p);
+ return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);
+}
+
+/// \brief Create a 128-bit vector of [4 x float] with undefined values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \returns A 128-bit vector of [4 x float] containing undefined values.
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_undefined_ps(void)
+{
+ return (__m128)__builtin_ia32_undef128();
+}
+
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+/// 32 bits of the vector are initialized with the specified single-precision
+/// floating-point value. The upper 96 bits are set to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+///
+/// \param __w
+/// A single-precision floating-point value used to initialize the lower 32
+/// bits of the result.
+/// \returns An initialized 128-bit floating-point vector of [4 x float]. The
+/// lower 32 bits contain the value provided in the source operand. The
+/// upper 96 bits are set to zero.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set_ss(float __w)
+{
+ return (__m128){ __w, 0, 0, 0 };
+}
+
+/// \brief Constructs a 128-bit floating-point vector of [4 x float], with each
+/// of the four single-precision floating-point vector elements set to the
+/// specified single-precision floating-point value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param __w
+/// A single-precision floating-point value used to initialize each vector
+/// element of the result.
+/// \returns An initialized 128-bit floating-point vector of [4 x float].
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set1_ps(float __w)
+{
+ return (__m128){ __w, __w, __w, __w };
+}
+
+/* Microsoft specific. */
+/// \brief Constructs a 128-bit floating-point vector of [4 x float], with each
+/// of the four single-precision floating-point vector elements set to the
+/// specified single-precision floating-point value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+///
+/// \param __w
+/// A single-precision floating-point value used to initialize each vector
+/// element of the result.
+/// \returns An initialized 128-bit floating-point vector of [4 x float].
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set_ps1(float __w)
+{
+ return _mm_set1_ps(__w);
+}
+
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]
+/// initialized with the specified single-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __z
+/// A single-precision floating-point value used to initialize bits [127:96]
+/// of the result.
+/// \param __y
+/// A single-precision floating-point value used to initialize bits [95:64]
+/// of the result.
+/// \param __x
+/// A single-precision floating-point value used to initialize bits [63:32]
+/// of the result.
+/// \param __w
+/// A single-precision floating-point value used to initialize bits [31:0]
+/// of the result.
+/// \returns An initialized 128-bit floating-point vector of [4 x float].
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set_ps(float __z, float __y, float __x, float __w)
+{
+ return (__m128){ __w, __x, __y, __z };
+}
+
+/// \brief Constructs a 128-bit floating-point vector of [4 x float],
+/// initialized in reverse order with the specified 32-bit single-precision
+/// float-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __z
+/// A single-precision floating-point value used to initialize bits [31:0]
+/// of the result.
+/// \param __y
+/// A single-precision floating-point value used to initialize bits [63:32]
+/// of the result.
+/// \param __x
+/// A single-precision floating-point value used to initialize bits [95:64]
+/// of the result.
+/// \param __w
+/// A single-precision floating-point value used to initialize bits [127:96]
+/// of the result.
+/// \returns An initialized 128-bit floating-point vector of [4 x float].
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_setr_ps(float __z, float __y, float __x, float __w)
+{
+ return (__m128){ __z, __y, __x, __w };
+}
+
+/// \brief Constructs a 128-bit floating-point vector of [4 x float] initialized
+/// to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VXORPS / XORPS instruction.
+///
+/// \returns An initialized 128-bit floating-point vector of [4 x float] with
+/// all elements set to zero.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_setzero_ps(void)
+{
+ return (__m128){ 0, 0, 0, 0 };
+}
+
+/// \brief Stores the upper 64 bits of a 128-bit vector of [4 x float] to a
+/// memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPEXTRQ / MOVQ instruction.
+///
+/// \param __p
+/// A pointer to a 64-bit memory location.
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the values to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeh_pi(__m64 *__p, __m128 __a)
+{
+ __builtin_ia32_storehps((__v2si *)__p, (__v4sf)__a);
+}
+
+/// \brief Stores the lower 64 bits of a 128-bit vector of [4 x float] to a
+/// memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVLPS / MOVLPS instruction.
+///
+/// \param __p
+/// A pointer to a memory location that will receive the float values.
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the values to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storel_pi(__m64 *__p, __m128 __a)
+{
+ __builtin_ia32_storelps((__v2si *)__p, (__v4sf)__a);
+}
+
+/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] to a
+/// memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+///
+/// \param __p
+/// A pointer to a 32-bit memory location.
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the value to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_ss(float *__p, __m128 __a)
+{
+ struct __mm_store_ss_struct {
+ float __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store_ss_struct*)__p)->__u = __a[0];
+}
+
+/// \brief Stores float values from a 128-bit vector of [4 x float] to an
+/// unaligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVUPS / MOVUPS instruction.
+///
+/// \param __p
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the values to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_ps(float *__p, __m128 __a)
+{
+ struct __storeu_ps {
+ __m128 __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_ps*)__p)->__v = __a;
+}
+
+/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into
+/// four contiguous elements in an aligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to \c VMOVAPS / MOVAPS + \c shuffling
+/// instruction.
+///
+/// \param __p
+/// A pointer to a 128-bit memory location.
+/// \param __a
+/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
+/// of the four contiguous elements pointed by __p.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_ps(float *__p, __m128 __a)
+{
+ *(__m128*)__p = __a;
+}
+
+/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into
+/// four contiguous elements in an aligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to \c VMOVAPS / MOVAPS + \c shuffling
+/// instruction.
+///
+/// \param __p
+/// A pointer to a 128-bit memory location.
+/// \param __a
+/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
+/// of the four contiguous elements pointed by __p.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store1_ps(float *__p, __m128 __a)
+{
+ __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 0, 0);
+ _mm_store_ps(__p, __a);
+}
+
+/// \brief Stores float values from a 128-bit vector of [4 x float] to an
+/// aligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS instruction.
+///
+/// \param __p
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location has to be 128-bit aligned.
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the values to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_ps1(float *__p, __m128 __a)
+{
+ return _mm_store1_ps(__p, __a);
+}
+
+/// \brief Stores float values from a 128-bit vector of [4 x float] to an
+/// aligned memory location in reverse order.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS + \c shuffling
+/// instruction.
+///
+/// \param __p
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location has to be 128-bit aligned.
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the values to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storer_ps(float *__p, __m128 __a)
+{
+ __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);
+ _mm_store_ps(__p, __a);
+}
+
+#define _MM_HINT_T0 3
+#define _MM_HINT_T1 2
+#define _MM_HINT_T2 1
+#define _MM_HINT_NTA 0
+
+#ifndef _MSC_VER
+/* FIXME: We have to #define this because "sel" must be a constant integer, and
+ Sema doesn't do any form of constant propagation yet. */
+
+/// \brief Loads one cache line of data from the specified address to a location
+/// closer to the processor.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// void _mm_prefetch(const void * a, const int sel);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c PREFETCHNTA instruction.
+///
+/// \param a
+/// A pointer to a memory location containing a cache line of data.
+/// \param sel
+/// A predefined integer constant specifying the type of prefetch operation:
+/// _MM_HINT_NTA: Move data using the non-temporal access (NTA) hint.
+/// The PREFETCHNTA instruction will be generated.
+/// _MM_HINT_T0: Move data using the T0 hint. The PREFETCHT0 instruction will
+/// be generated.
+/// _MM_HINT_T1: Move data using the T1 hint. The PREFETCHT1 instruction will
+/// be generated.
+/// _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will
+/// be generated.
+#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
+#endif
+
+/// \brief Stores a 64-bit integer in the specified aligned memory location. To
+/// minimize caching, the data is flagged as non-temporal (unlikely to be
+/// used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MOVNTQ instruction.
+///
+/// \param __p
+/// A pointer to an aligned memory location used to store the register value.
+/// \param __a
+/// A 64-bit integer containing the value to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_pi(__m64 *__p, __m64 __a)
+{
+ __builtin_ia32_movntq(__p, __a);
+}
+
+/// \brief Moves packed float values from a 128-bit vector of [4 x float] to a
+/// 128-bit aligned memory location. To minimize caching, the data is flagged
+/// as non-temporal (unlikely to be used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVNTPS / MOVNTPS instruction.
+///
+/// \param __p
+/// A pointer to a 128-bit aligned memory location that will receive the
+/// integer values.
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the values to be moved.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_ps(float *__p, __m128 __a)
+{
+ __builtin_nontemporal_store((__v4sf)__a, (__v4sf*)__p);
+}
+
+/// \brief Forces strong memory ordering (serialization) between store
+/// instructions preceding this instruction and store instructions following
+/// this instruction, ensuring the system completes all previous stores
+/// before executing subsequent stores.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c SFENCE instruction.
+///
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_sfence(void)
+{
+ __builtin_ia32_sfence();
+}
+
+/// \brief Extracts 16-bit element from a 64-bit vector of [4 x i16] and
+/// returns it, as specified by the immediate integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPEXTRW / PEXTRW instruction.
+///
+/// \param __a
+/// A 64-bit vector of [4 x i16].
+/// \param __n
+/// An immediate integer operand that determines which bits are extracted:
+/// 0: Bits [15:0] are copied to the destination.
+/// 1: Bits [31:16] are copied to the destination.
+/// 2: Bits [47:32] are copied to the destination.
+/// 3: Bits [63:48] are copied to the destination.
+/// \returns A 16-bit integer containing the extracted 16 bits of packed data.
+#define _mm_extract_pi16(a, n) __extension__ ({ \
+ (int)__builtin_ia32_vec_ext_v4hi((__m64)a, (int)n); })
+
+/// \brief Copies data from the 64-bit vector of [4 x i16] to the destination,
+/// and inserts the lower 16-bits of an integer operand at the 16-bit offset
+/// specified by the immediate operand __n.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VPINSRW / PINSRW instruction.
+///
+/// \param __a
+/// A 64-bit vector of [4 x i16].
+/// \param __d
+/// An integer. The lower 16-bit value from this operand is written to the
+/// destination at the offset specified by operand __n.
+/// \param __n
+/// An immediate integer operant that determines which the bits to be used
+/// in the destination.
+/// 0: Bits [15:0] are copied to the destination.
+/// 1: Bits [31:16] are copied to the destination.
+/// 2: Bits [47:32] are copied to the destination.
+/// 3: Bits [63:48] are copied to the destination.
+/// The remaining bits in the destination are copied from the corresponding
+/// bits in operand __a.
+/// \returns A 64-bit integer vector containing the copied packed data from the
+/// operands.
+#define _mm_insert_pi16(a, d, n) __extension__ ({ \
+ (__m64)__builtin_ia32_vec_set_v4hi((__m64)a, (int)d, (int)n); })
+
+/// \brief Compares each of the corresponding packed 16-bit integer values of
+/// the 64-bit integer vectors, and writes the greater value to the
+/// corresponding bits in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMAXSW instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the comparison results.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_max_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b);
+}
+
+/// \brief Compares each of the corresponding packed 8-bit unsigned integer
+/// values of the 64-bit integer vectors, and writes the greater value to the
+/// corresponding bits in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMAXUB instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the comparison results.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_max_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b);
+}
+
+/// \brief Compares each of the corresponding packed 16-bit integer values of
+/// the 64-bit integer vectors, and writes the lesser value to the
+/// corresponding bits in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMINSW instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the comparison results.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_min_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b);
+}
+
+/// \brief Compares each of the corresponding packed 8-bit unsigned integer
+/// values of the 64-bit integer vectors, and writes the lesser value to the
+/// corresponding bits in the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMINUB instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the comparison results.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_min_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b);
+}
+
+/// \brief Takes the most significant bit from each 8-bit element in a 64-bit
+/// integer vector to create a 16-bit mask value. Zero-extends the value to
+/// 32-bit integer and writes it to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMOVMSKB instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing the values with bits to be extracted.
+/// \returns The most significant bit from each 8-bit element in the operand,
+/// written to bits [15:0].
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_pi8(__m64 __a)
+{
+ return __builtin_ia32_pmovmskb((__v8qi)__a);
+}
+
+/// \brief Multiplies packed 16-bit unsigned integer values and writes the
+/// high-order 16 bits of each 32-bit product to the corresponding bits in
+/// the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PMULHUW instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the products of both operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mulhi_pu16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b);
+}
+
+/// \brief Shuffles the 4 16-bit integers from a 64-bit integer vector to the
+/// destination, as specified by the immediate value operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSHUFW instruction.
+///
+/// \code
+/// __m64 _mm_shuffle_pi16(__m64 a, const int n);
+/// \endcode
+///
+/// \param a
+/// A 64-bit integer vector containing the values to be shuffled.
+/// \param n
+/// An immediate value containing an 8-bit value specifying which elements to
+/// copy from a. The destinations within the 64-bit destination are assigned
+/// values as follows:
+/// Bits [1:0] are used to assign values to bits [15:0] in the destination.
+/// Bits [3:2] are used to assign values to bits [31:16] in the destination.
+/// Bits [5:4] are used to assign values to bits [47:32] in the destination.
+/// Bits [7:6] are used to assign values to bits [63:48] in the destination.
+/// Bit value assignments:
+/// 00: assigned from bits [15:0] of a.
+/// 01: assigned from bits [31:16] of a.
+/// 10: assigned from bits [47:32] of a.
+/// 11: assigned from bits [63:48] of a.
+/// \returns A 64-bit integer vector containing the shuffled values.
+#define _mm_shuffle_pi16(a, n) __extension__ ({ \
+ (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)); })
+
+/// \brief Conditionally copies the values from each 8-bit element in the first
+/// 64-bit integer vector operand to the specified memory location, as
+/// specified by the most significant bit in the corresponding element in the
+/// second 64-bit integer vector operand. To minimize caching, the data is
+/// flagged as non-temporal (unlikely to be used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MASKMOVQ instruction.
+///
+/// \param __d
+/// A 64-bit integer vector containing the values with elements to be copied.
+/// \param __n
+/// A 64-bit integer vector operand. The most significant bit from each 8-bit
+/// element determines whether the corresponding element in operand __d is
+/// copied. If the most significant bit of a given element is 1, the
+/// corresponding element in operand __d is copied.
+/// \param __p
+/// A pointer to a 64-bit memory location that will receive the conditionally
+/// copied integer values. The address of the memory location does not have
+/// to be aligned.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
+{
+ __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p);
+}
+
+/// \brief Computes the rounded averages of the packed unsigned 8-bit integer
+/// values and writes the averages to the corresponding bits in the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PAVGB instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the averages of both operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_avg_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b);
+}
+
+/// \brief Computes the rounded averages of the packed unsigned 16-bit integer
+/// values and writes the averages to the corresponding bits in the
+/// destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PAVGW instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector containing the averages of both operands.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_avg_pu16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b);
+}
+
+/// \brief Subtracts the corresponding 8-bit unsigned integer values of the two
+/// 64-bit vector operands and computes the absolute value for each of the
+/// difference. Then sum of the 8 absolute differences is written to the
+/// bits [15:0] of the destination; the remaining bits [63:16] are cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PSADBW instruction.
+///
+/// \param __a
+/// A 64-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 64-bit integer vector containing one of the source operands.
+/// \returns A 64-bit integer vector whose lower 16 bits contain the sums of the
+/// sets of absolute differences between both operands. The upper bits are
+/// cleared.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sad_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);
+}
+
+/// \brief Returns the contents of the MXCSR register as a 32-bit unsigned
+/// integer value. There are several groups of macros associated with this
+/// intrinsic, including:
+/// * For checking exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
+/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,
+/// _MM_EXCEPT_INEXACT. There is a convenience wrapper
+/// _MM_GET_EXCEPTION_STATE().
+/// * For checking exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
+/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.
+/// There is a convenience wrapper _MM_GET_EXCEPTION_MASK().
+/// * For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
+/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
+/// _MM_GET_ROUNDING_MODE(x) where x is one of these macros.
+/// * For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
+/// There is a convenience wrapper _MM_GET_FLUSH_ZERO_MODE().
+/// * For checking denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
+/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper
+/// _MM_GET_DENORMALS_ZERO_MODE().
+///
+/// For example, the expression below checks if an overflow exception has
+/// occurred:
+/// ( _mm_getcsr() & _MM_EXCEPT_OVERFLOW )
+///
+/// The following example gets the current rounding mode:
+/// _MM_GET_ROUNDING_MODE()
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VSTMXCSR / STMXCSR instruction.
+///
+/// \returns A 32-bit unsigned integer containing the contents of the MXCSR
+/// register.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_getcsr(void)
+{
+ return __builtin_ia32_stmxcsr();
+}
+
+/// \brief Sets the MXCSR register with the 32-bit unsigned integer value. There
+/// are several groups of macros associated with this intrinsic, including:
+/// * For setting exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
+/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,
+/// _MM_EXCEPT_INEXACT. There is a convenience wrapper
+/// _MM_SET_EXCEPTION_STATE(x) where x is one of these macros.
+/// * For setting exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
+/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.
+/// There is a convenience wrapper _MM_SET_EXCEPTION_MASK(x) where x is one
+/// of these macros.
+/// * For setting rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
+/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
+/// _MM_SET_ROUNDING_MODE(x) where x is one of these macros.
+/// * For setting flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
+/// There is a convenience wrapper _MM_SET_FLUSH_ZERO_MODE(x) where x is
+/// one of these macros.
+/// * For setting denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
+/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper
+/// _MM_SET_DENORMALS_ZERO_MODE(x) where x is one of these macros.
+///
+/// For example, the following expression causes subsequent floating-point
+/// operations to round up:
+/// _mm_setcsr(_mm_getcsr() | _MM_ROUND_UP)
+///
+/// The following example sets the DAZ and FTZ flags:
+/// void setFlags() {
+/// _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON)
+/// _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON)
+/// }
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VLDMXCSR / LDMXCSR instruction.
+///
+/// \param __i
+/// A 32-bit unsigned integer value to be written to the MXCSR register.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_setcsr(unsigned int __i)
+{
+ __builtin_ia32_ldmxcsr(__i);
+}
+
+/// \brief Selects 4 float values from the 128-bit operands of [4 x float], as
+/// specified by the immediate value operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm_shuffle_ps(__m128 a, __m128 b, const int mask);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHUFPS / SHUFPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float].
+/// \param b
+/// A 128-bit vector of [4 x float].
+/// \param mask
+/// An immediate value containing an 8-bit value specifying which elements to
+/// copy from a and b.
+/// Bits [3:0] specify the values copied from operand a.
+/// Bits [7:4] specify the values copied from operand b. The destinations
+/// within the 128-bit destination are assigned values as follows:
+/// Bits [1:0] are used to assign values to bits [31:0] in the destination.
+/// Bits [3:2] are used to assign values to bits [63:32] in the destination.
+/// Bits [5:4] are used to assign values to bits [95:64] in the destination.
+/// Bits [7:6] are used to assign values to bits [127:96] in the destination.
+/// Bit value assignments:
+/// 00: Bits [31:0] copied from the specified operand.
+/// 01: Bits [63:32] copied from the specified operand.
+/// 10: Bits [95:64] copied from the specified operand.
+/// 11: Bits [127:96] copied from the specified operand.
+/// \returns A 128-bit vector of [4 x float] containing the shuffled values.
+#define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
+ 0 + (((mask) >> 0) & 0x3), \
+ 0 + (((mask) >> 2) & 0x3), \
+ 4 + (((mask) >> 4) & 0x3), \
+ 4 + (((mask) >> 6) & 0x3)); })
+
+/// \brief Unpacks the high-order (index 2,3) values from two 128-bit vectors of
+/// [4 x float] and interleaves them into a 128-bit vector of [4 x
+/// float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUNPCKHPS / UNPCKHPS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// Bits [95:64] are written to bits [31:0] of the destination.
+/// Bits [127:96] are written to bits [95:64] of the destination.
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// Bits [95:64] are written to bits [63:32] of the destination.
+/// Bits [127:96] are written to bits [127:96] of the destination.
+/// \returns A 128-bit vector of [4 x float] containing the interleaved values.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_unpackhi_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 2, 6, 3, 7);
+}
+
+/// \brief Unpacks the low-order (index 0,1) values from two 128-bit vectors of
+/// [4 x float] and interleaves them into a 128-bit vector of [4 x
+/// float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUNPCKLPS / UNPCKLPS instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// Bits [31:0] are written to bits [31:0] of the destination.
+/// Bits [63:32] are written to bits [95:64] of the destination.
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// Bits [31:0] are written to bits [63:32] of the destination.
+/// Bits [63:32] are written to bits [127:96] of the destination.
+/// \returns A 128-bit vector of [4 x float] containing the interleaved values.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_unpacklo_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 4, 1, 5);
+}
+
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+/// 32 bits are set to the lower 32 bits of the second parameter. The upper
+/// 96 bits are set to the upper 96 bits of the first parameter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+///
+/// \param __a
+/// A 128-bit floating-point vector of [4 x float]. The upper 96 bits are
+/// written to the upper 96 bits of the result.
+/// \param __b
+/// A 128-bit floating-point vector of [4 x float]. The lower 32 bits are
+/// written to the lower 32 bits of the result.
+/// \returns A 128-bit floating-point vector of [4 x float].
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_move_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 4, 1, 2, 3);
+}
+
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+/// 64 bits are set to the upper 64 bits of the second parameter. The upper
+/// 64 bits are set to the upper 64 bits of the first parameter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUNPCKHPD / UNPCKHPD instruction.
+///
+/// \param __a
+/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are
+/// written to the upper 64 bits of the result.
+/// \param __b
+/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are
+/// written to the lower 64 bits of the result.
+/// \returns A 128-bit floating-point vector of [4 x float].
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_movehl_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 6, 7, 2, 3);
+}
+
+/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+/// 64 bits are set to the lower 64 bits of the first parameter. The upper
+/// 64 bits are set to the lower 64 bits of the second parameter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VUNPCKLPD / UNPCKLPD instruction.
+///
+/// \param __a
+/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are
+/// written to the lower 64 bits of the result.
+/// \param __b
+/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are
+/// written to the upper 64 bits of the result.
+/// \returns A 128-bit floating-point vector of [4 x float].
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_movelh_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 1, 4, 5);
+}
+
+/// \brief Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x
+/// float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+///
+/// \param __a
+/// A 64-bit vector of [4 x i16]. The elements of the destination are copied
+/// from the corresponding elements in this operand.
+/// \returns A 128-bit vector of [4 x float] containing the copied and converted
+/// values from the operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi16_ps(__m64 __a)
+{
+ __m64 __b, __c;
+ __m128 __r;
+
+ __b = _mm_setzero_si64();
+ __b = _mm_cmpgt_pi16(__b, __a);
+ __c = _mm_unpackhi_pi16(__a, __b);
+ __r = _mm_setzero_ps();
+ __r = _mm_cvtpi32_ps(__r, __c);
+ __r = _mm_movelh_ps(__r, __r);
+ __c = _mm_unpacklo_pi16(__a, __b);
+ __r = _mm_cvtpi32_ps(__r, __c);
+
+ return __r;
+}
+
+/// \brief Converts a 64-bit vector of 16-bit unsigned integer values into a
+/// 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+///
+/// \param __a
+/// A 64-bit vector of 16-bit unsigned integer values. The elements of the
+/// destination are copied from the corresponding elements in this operand.
+/// \returns A 128-bit vector of [4 x float] containing the copied and converted
+/// values from the operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpu16_ps(__m64 __a)
+{
+ __m64 __b, __c;
+ __m128 __r;
+
+ __b = _mm_setzero_si64();
+ __c = _mm_unpackhi_pi16(__a, __b);
+ __r = _mm_setzero_ps();
+ __r = _mm_cvtpi32_ps(__r, __c);
+ __r = _mm_movelh_ps(__r, __r);
+ __c = _mm_unpacklo_pi16(__a, __b);
+ __r = _mm_cvtpi32_ps(__r, __c);
+
+ return __r;
+}
+
+/// \brief Converts the lower four 8-bit values from a 64-bit vector of [8 x i8]
+/// into a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+///
+/// \param __a
+/// A 64-bit vector of [8 x i8]. The elements of the destination are copied
+/// from the corresponding lower 4 elements in this operand.
+/// \returns A 128-bit vector of [4 x float] containing the copied and converted
+/// values from the operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi8_ps(__m64 __a)
+{
+ __m64 __b;
+
+ __b = _mm_setzero_si64();
+ __b = _mm_cmpgt_pi8(__b, __a);
+ __b = _mm_unpacklo_pi8(__a, __b);
+
+ return _mm_cvtpi16_ps(__b);
+}
+
+/// \brief Converts the lower four unsigned 8-bit integer values from a 64-bit
+/// vector of [8 x u8] into a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+///
+/// \param __a
+/// A 64-bit vector of unsigned 8-bit integer values. The elements of the
+/// destination are copied from the corresponding lower 4 elements in this
+/// operand.
+/// \returns A 128-bit vector of [4 x float] containing the copied and converted
+/// values from the source operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpu8_ps(__m64 __a)
+{
+ __m64 __b;
+
+ __b = _mm_setzero_si64();
+ __b = _mm_unpacklo_pi8(__a, __b);
+
+ return _mm_cvtpi16_ps(__b);
+}
+
+/// \brief Converts the two 32-bit signed integer values from each 64-bit vector
+/// operand of [2 x i32] into a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+///
+/// \param __a
+/// A 64-bit vector of [2 x i32]. The lower elements of the destination are
+/// copied from the elements in this operand.
+/// \param __b
+/// A 64-bit vector of [2 x i32]. The upper elements of the destination are
+/// copied from the elements in this operand.
+/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
+/// copied and converted values from the first operand. The upper 64 bits
+/// contain the copied and converted values from the second operand.
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
+{
+ __m128 __c;
+
+ __c = _mm_setzero_ps();
+ __c = _mm_cvtpi32_ps(__c, __b);
+ __c = _mm_movelh_ps(__c, __c);
+
+ return _mm_cvtpi32_ps(__c, __a);
+}
+
+/// \brief Converts each single-precision floating-point element of a 128-bit
+/// floating-point vector of [4 x float] into a 16-bit signed integer, and
+/// packs the results into a 64-bit integer vector of [4 x i16]. If the
+/// floating-point element is NaN or infinity, or if the floating-point
+/// element is greater than 0x7FFFFFFF or less than -0x8000, it is converted
+/// to 0x8000. Otherwise if the floating-point element is greater
+/// than 0x7FFF, it is converted to 0x7FFF.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPS2PI + \c COMPOSITE instruction.
+///
+/// \param __a
+/// A 128-bit floating-point vector of [4 x float].
+/// \returns A 64-bit integer vector of [4 x i16] containing the converted
+/// values.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtps_pi16(__m128 __a)
+{
+ __m64 __b, __c;
+
+ __b = _mm_cvtps_pi32(__a);
+ __a = _mm_movehl_ps(__a, __a);
+ __c = _mm_cvtps_pi32(__a);
+
+ return _mm_packs_pi32(__b, __c);
+}
+
+/// \brief Converts each single-precision floating-point element of a 128-bit
+/// floating-point vector of [4 x float] into an 8-bit signed integer, and
+/// packs the results into the lower 32 bits of a 64-bit integer vector of
+/// [8 x i8]. The upper 32 bits of the vector are set to 0. If the
+/// floating-point element is NaN or infinity, or if the floating-point
+/// element is greater than 0x7FFFFFFF or less than -0x80, it is converted
+/// to 0x80. Otherwise if the floating-point element is greater
+/// than 0x7F, it is converted to 0x7F.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CVTPS2PI + \c COMPOSITE instruction.
+///
+/// \param __a
+/// 128-bit floating-point vector of [4 x float].
+/// \returns A 64-bit integer vector of [8 x i8]. The lower 32 bits contain the
+/// converted values and the uppper 32 bits are set to zero.
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtps_pi8(__m128 __a)
+{
+ __m64 __b, __c;
+
+ __b = _mm_cvtps_pi16(__a);
+ __c = _mm_setzero_si64();
+
+ return _mm_packs_pi16(__b, __c);
+}
+
+/// \brief Extracts the sign bits from each single-precision floating-point
+/// element of a 128-bit floating-point vector of [4 x float] and returns the
+/// sign bits in bits [0:3] of the result. Bits [31:4] of the result are set
+/// to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVMSKPS / MOVMSKPS instruction.
+///
+/// \param __a
+/// A 128-bit floating-point vector of [4 x float].
+/// \returns A 32-bit integer value. Bits [3:0] contain the sign bits from each
+/// single-precision floating-point element of the parameter. Bits [31:4] are
+/// set to zero.
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_ps(__m128 __a)
+{
+ return __builtin_ia32_movmskps((__v4sf)__a);
+}
+
+
+#define _MM_ALIGN16 __attribute__((aligned(16)))
+
+#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
+
+#define _MM_EXCEPT_INVALID (0x0001)
+#define _MM_EXCEPT_DENORM (0x0002)
+#define _MM_EXCEPT_DIV_ZERO (0x0004)
+#define _MM_EXCEPT_OVERFLOW (0x0008)
+#define _MM_EXCEPT_UNDERFLOW (0x0010)
+#define _MM_EXCEPT_INEXACT (0x0020)
+#define _MM_EXCEPT_MASK (0x003f)
+
+#define _MM_MASK_INVALID (0x0080)
+#define _MM_MASK_DENORM (0x0100)
+#define _MM_MASK_DIV_ZERO (0x0200)
+#define _MM_MASK_OVERFLOW (0x0400)
+#define _MM_MASK_UNDERFLOW (0x0800)
+#define _MM_MASK_INEXACT (0x1000)
+#define _MM_MASK_MASK (0x1f80)
+
+#define _MM_ROUND_NEAREST (0x0000)
+#define _MM_ROUND_DOWN (0x2000)
+#define _MM_ROUND_UP (0x4000)
+#define _MM_ROUND_TOWARD_ZERO (0x6000)
+#define _MM_ROUND_MASK (0x6000)
+
+#define _MM_FLUSH_ZERO_MASK (0x8000)
+#define _MM_FLUSH_ZERO_ON (0x8000)
+#define _MM_FLUSH_ZERO_OFF (0x0000)
+
+#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
+#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
+#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK)
+#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)
+
+#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x)))
+#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))
+#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x)))
+#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))
+
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+do { \
+ __m128 tmp3, tmp2, tmp1, tmp0; \
+ tmp0 = _mm_unpacklo_ps((row0), (row1)); \
+ tmp2 = _mm_unpacklo_ps((row2), (row3)); \
+ tmp1 = _mm_unpackhi_ps((row0), (row1)); \
+ tmp3 = _mm_unpackhi_ps((row2), (row3)); \
+ (row0) = _mm_movelh_ps(tmp0, tmp2); \
+ (row1) = _mm_movehl_ps(tmp2, tmp0); \
+ (row2) = _mm_movelh_ps(tmp1, tmp3); \
+ (row3) = _mm_movehl_ps(tmp3, tmp1); \
+} while (0)
+
+/* Aliases for compatibility. */
+#define _m_pextrw _mm_extract_pi16
+#define _m_pinsrw _mm_insert_pi16
+#define _m_pmaxsw _mm_max_pi16
+#define _m_pmaxub _mm_max_pu8
+#define _m_pminsw _mm_min_pi16
+#define _m_pminub _mm_min_pu8
+#define _m_pmovmskb _mm_movemask_pi8
+#define _m_pmulhuw _mm_mulhi_pu16
+#define _m_pshufw _mm_shuffle_pi16
+#define _m_maskmovq _mm_maskmove_si64
+#define _m_pavgb _mm_avg_pu8
+#define _m_pavgw _mm_avg_pu16
+#define _m_psadbw _mm_sad_pu8
+#define _m_ _mm_
+#define _m_ _mm_
+
+#undef __DEFAULT_FN_ATTRS
+
+/* Ugly hack for backwards-compatibility (compatible with gcc) */
+#if defined(__SSE2__) && !__building_module(_Builtin_intrinsics)
+#include <emmintrin.h>
+#endif
+
+#endif /* __XMMINTRIN_H */
diff --git a/current/clang-include/xopintrin.h b/current/clang-include/xopintrin.h
new file mode 100644
index 0000000..bdf0cec
--- /dev/null
+++ b/current/clang-include/xopintrin.h
@@ -0,0 +1,782 @@
+/*===---- xopintrin.h - XOP intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <xopintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __XOPINTRIN_H
+#define __XOPINTRIN_H
+
+#include <fma4intrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop")))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddw_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epi32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadddq((__v4si)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddw_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epu16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epu16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epu32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubw_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubd_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubq_epi32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpcmov((__v2di)__A, (__v2di)__B, (__v2di)__C);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)
+{
+ return (__m256i)__builtin_ia32_vpcmov_256((__v4di)__A, (__v4di)__B, (__v4di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);
+}
+
+#define _mm_roti_epi8(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)); })
+
+#define _mm_roti_epi16(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)); })
+
+#define _mm_roti_epi32(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)); })
+
+#define _mm_roti_epi64(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B);
+}
+
+#define _mm_com_epu8(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (N)); })
+
+#define _mm_com_epu16(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (N)); })
+
+#define _mm_com_epu32(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (N)); })
+
+#define _mm_com_epu64(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (N)); })
+
+#define _mm_com_epi8(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (N)); })
+
+#define _mm_com_epi16(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (N)); })
+
+#define _mm_com_epi32(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (N)); })
+
+#define _mm_com_epi64(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (N)); })
+
+#define _MM_PCOMCTRL_LT 0
+#define _MM_PCOMCTRL_LE 1
+#define _MM_PCOMCTRL_GT 2
+#define _MM_PCOMCTRL_GE 3
+#define _MM_PCOMCTRL_EQ 4
+#define _MM_PCOMCTRL_NEQ 5
+#define _MM_PCOMCTRL_FALSE 6
+#define _MM_PCOMCTRL_TRUE 7
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+#define _mm_permute2_pd(X, Y, C, I) __extension__ ({ \
+ (__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (__v2di)(__m128i)(C), (I)); })
+
+#define _mm256_permute2_pd(X, Y, C, I) __extension__ ({ \
+ (__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
+ (__v4df)(__m256d)(Y), \
+ (__v4di)(__m256i)(C), (I)); })
+
+#define _mm_permute2_ps(X, Y, C, I) __extension__ ({ \
+ (__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
+ (__v4si)(__m128i)(C), (I)); })
+
+#define _mm256_permute2_ps(X, Y, C, I) __extension__ ({ \
+ (__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
+ (__v8sf)(__m256)(Y), \
+ (__v8si)(__m256i)(C), (I)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_frcz_ss(__m128 __A)
+{
+ return (__m128)__builtin_ia32_vfrczss((__v4sf)__A);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_frcz_sd(__m128d __A)
+{
+ return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_frcz_ps(__m128 __A)
+{
+ return (__m128)__builtin_ia32_vfrczps((__v4sf)__A);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_frcz_pd(__m128d __A)
+{
+ return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_frcz_ps(__m256 __A)
+{
+ return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_frcz_pd(__m256d __A)
+{
+ return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __XOPINTRIN_H */
diff --git a/current/clang-include/xsavecintrin.h b/current/clang-include/xsavecintrin.h
new file mode 100644
index 0000000..598470a
--- /dev/null
+++ b/current/clang-include/xsavecintrin.h
@@ -0,0 +1,48 @@
+/*===---- xsavecintrin.h - XSAVEC intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsavecintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVECINTRIN_H
+#define __XSAVECINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsavec(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsavec(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsavec64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsavec64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/xsaveintrin.h b/current/clang-include/xsaveintrin.h
new file mode 100644
index 0000000..a2e6b2e
--- /dev/null
+++ b/current/clang-include/xsaveintrin.h
@@ -0,0 +1,58 @@
+/*===---- xsaveintrin.h - XSAVE intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsaveintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVEINTRIN_H
+#define __XSAVEINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsave")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsave(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsave(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstor(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xrstor(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsave64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsave64(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstor64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xrstor64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/xsaveoptintrin.h b/current/clang-include/xsaveoptintrin.h
new file mode 100644
index 0000000..d3faae7
--- /dev/null
+++ b/current/clang-include/xsaveoptintrin.h
@@ -0,0 +1,48 @@
+/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsaveoptintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVEOPTINTRIN_H
+#define __XSAVEOPTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaveopt")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaveopt(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsaveopt(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaveopt64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsaveopt64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/xsavesintrin.h b/current/clang-include/xsavesintrin.h
new file mode 100644
index 0000000..c5e540a
--- /dev/null
+++ b/current/clang-include/xsavesintrin.h
@@ -0,0 +1,58 @@
+/*===---- xsavesintrin.h - XSAVES intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsavesintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVESINTRIN_H
+#define __XSAVESINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaves")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaves(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsaves(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstors(void *__p, unsigned long long __m) {
+ __builtin_ia32_xrstors(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstors64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xrstors64(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaves64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsaves64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/current/clang-include/xtestintrin.h b/current/clang-include/xtestintrin.h
new file mode 100644
index 0000000..9d3378f
--- /dev/null
+++ b/current/clang-include/xtestintrin.h
@@ -0,0 +1,41 @@
+/*===---- xtestintrin.h - XTEST intrinsic ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xtestintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XTESTINTRIN_H
+#define __XTESTINTRIN_H
+
+/* xtest returns non-zero if the instruction is executed within an RTM or active
+ * HLE region. */
+/* FIXME: This can be an either or for RTM/HLE. Deal with this when HLE is
+ * supported. */
+static __inline__ int
+ __attribute__((__always_inline__, __nodebug__, __target__("rtm")))
+ _xtest(void) {
+ return __builtin_ia32_xtest();
+}
+
+#endif
diff --git a/current/lib/libLLVM.dll b/current/lib/libLLVM.dll
new file mode 100755
index 0000000..8286094
--- /dev/null
+++ b/current/lib/libLLVM.dll
Binary files differ
diff --git a/current/lib/libbcc.dll b/current/lib/libbcc.dll
new file mode 100755
index 0000000..ba9258a
--- /dev/null
+++ b/current/lib/libbcc.dll
Binary files differ
diff --git a/current/lib/libbcinfo.dll b/current/lib/libbcinfo.dll
new file mode 100755
index 0000000..6b92ec4
--- /dev/null
+++ b/current/lib/libbcinfo.dll
Binary files differ
diff --git a/current/lib/libclang.dll b/current/lib/libclang.dll
new file mode 100755
index 0000000..4089f0f
--- /dev/null
+++ b/current/lib/libclang.dll
Binary files differ
diff --git a/current/manifest_3518219.xml b/current/manifest_3518219.xml
new file mode 100644
index 0000000..3f8d29e
--- /dev/null
+++ b/current/manifest_3518219.xml
@@ -0,0 +1,540 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<manifest>
+ <remote fetch=".." name="aosp" review="https://android-review.googlesource.com/" />
+ <default remote="aosp" revision="master" sync-j="4" />
+ <project groups="pdk" name="platform/build" path="build/make" revision="7ed3af23a8a463a865c9c095975df81f64e2af57" upstream="master">
+ <copyfile dest="Makefile" src="core/root.mk" />
+ <linkfile dest="build/CleanSpec.mk" src="CleanSpec.mk" />
+ <linkfile dest="build/buildspec.mk.default" src="buildspec.mk.default" />
+ <linkfile dest="build/core" src="core" />
+ <linkfile dest="build/envsetup.sh" src="envsetup.sh" />
+ <linkfile dest="build/target" src="target" />
+ <linkfile dest="build/tools" src="tools" />
+ </project>
+ <project groups="pdk,tradefed" name="platform/build/blueprint" path="build/blueprint" revision="ce31b335915cd41bb46ef97b5d810ad0df107335" upstream="master" />
+ <project groups="pdk,tradefed" name="platform/build/kati" path="build/kati" revision="7e2a7c776b73857214538760ab6aa08c6c831486" upstream="master" />
+ <project groups="pdk,tradefed" name="platform/build/soong" path="build/soong" revision="d328ad708eeecc04f60ae455799e70875dcdf324" upstream="master">
+ <linkfile dest="Android.bp" src="root.bp" />
+ <linkfile dest="bootstrap.bash" src="bootstrap.bash" />
+ </project>
+ <project groups="pdk" name="platform/art" path="art" revision="b9bc47f0e24580a9dfafec830c709842b71aef89" upstream="master" />
+ <project groups="pdk" name="platform/bionic" path="bionic" revision="d39380cc4d3e9fdd2942da0aaafe939c38b6331e" upstream="master" />
+ <project groups="pdk" name="platform/bootable/recovery" path="bootable/recovery" revision="a511a3c4071d5b119605e9edcddc04a715581955" upstream="master" />
+ <project groups="pdk" name="platform/compatibility/cdd" path="compatibility/cdd" revision="827e91e434d3cf90bb4547110be78d8876a5820e" upstream="master" />
+ <project groups="cts,pdk-cw-fs,pdk-fs" name="platform/cts" path="cts" revision="254c6531f68e34bd0509b986eebd3515acf96ad0" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/dalvik" path="dalvik" revision="af28c471866316d720c3bbe7c68b2d4c78fb5daf" upstream="master" />
+ <project name="platform/developers/build" path="developers/build" revision="0161c10b89cc024688e36ac2088fb190c378ec92" upstream="master" />
+ <project name="platform/developers/demos" path="developers/demos" revision="95d49d216223e3431647abd79f5e376958353c95" upstream="master" />
+ <project name="platform/developers/samples/android" path="developers/samples/android" revision="bcdbd6bbd52e6713a452f2bc63233e4b776a67c8" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/development" path="development" revision="2c1ef76cc614ba1aad4e9786265aad3c98ed1455" upstream="master" />
+ <project groups="device,fugu,broadcom_pdk,pdk" name="device/asus/fugu" path="device/asus/fugu" revision="7a16fb337b8ecfe31e1b1a28616059952f49b49d" upstream="master" />
+ <project clone-depth="1" groups="device,fugu,broadcom_pdk,pdk" name="device/asus/fugu-kernel" path="device/asus/fugu-kernel" revision="6b92b38472501bcf5e425998ba60dcb8a138fc5a" upstream="master" />
+ <project groups="pdk-cw-fs,pdk" name="device/common" path="device/common" revision="eb68cdcc6cb0125e33dc964801c23c768a5c87dc" upstream="master" />
+ <project groups="pdk" name="device/generic/arm64" path="device/generic/arm64" revision="59076b7c5c52c55f7b88042788ee231524370533" upstream="master" />
+ <project groups="pdk" name="device/generic/armv7-a-neon" path="device/generic/armv7-a-neon" revision="c278a635601d682156eb5048539bcae33606bc4a" upstream="master" />
+ <project groups="pdk" name="device/generic/common" path="device/generic/common" revision="11c092a6cbfcf6207f07a9a8e3398e747e7f5461" upstream="master" />
+ <project groups="pdk" name="device/generic/goldfish" path="device/generic/goldfish" revision="a652a0995fe91ec01bfd224336e57b38b1f50983" upstream="master" />
+ <project groups="pdk" name="device/generic/mips" path="device/generic/mips" revision="bf07c7522e80eef53a58a49e5d05b8913d00d1a6" upstream="master" />
+ <project groups="pdk" name="device/generic/mips64" path="device/generic/mips64" revision="af38ee08585bcedd169309f3663aec70d0a91856" upstream="master" />
+ <project groups="pdk" name="device/generic/mini-emulator-arm64" path="device/generic/mini-emulator-arm64" revision="6f2e6974a1b1dc0ef936aec732e33d553c000b5e" upstream="master" />
+ <project groups="pdk" name="device/generic/mini-emulator-armv7-a-neon" path="device/generic/mini-emulator-armv7-a-neon" revision="9d7cbb60819a1da8408a7148b965b4cd1ebbb570" upstream="master" />
+ <project groups="pdk" name="device/generic/mini-emulator-mips" path="device/generic/mini-emulator-mips" revision="544e69736cff911c286a5d1d300dac228e23a73f" upstream="master" />
+ <project groups="pdk" name="device/generic/mini-emulator-mips64" path="device/generic/mini-emulator-mips64" revision="b1c36d0847fe6366527efd38fdb29d757ea887ee" upstream="master" />
+ <project groups="pdk" name="device/generic/mini-emulator-x86" path="device/generic/mini-emulator-x86" revision="65d59e2be2cfd713513a05d80a0f75794ab60579" upstream="master" />
+ <project groups="pdk" name="device/generic/mini-emulator-x86_64" path="device/generic/mini-emulator-x86_64" revision="673aae8bd65d9708821c4062f6a0364a859e7ae4" upstream="master" />
+ <project groups="pdk" name="device/generic/qemu" path="device/generic/qemu" revision="99feb6138f39e392495057ef7f2c4a5ed27f9e2d" upstream="master" />
+ <project groups="pdk" name="device/generic/x86" path="device/generic/x86" revision="81ce753ec0014fe404c4eec3ac6d04504356a638" upstream="master" />
+ <project groups="pdk" name="device/generic/x86_64" path="device/generic/x86_64" revision="f2d6d736a4a45e182349e932bd0708be45da7a4a" upstream="master" />
+ <project groups="device,pdk" name="device/google/accessory/arduino" path="device/google/accessory/arduino" revision="abc5159a3ca9dbb5c7e364a1eab99901a4440ac5" upstream="master" />
+ <project groups="device,pdk" name="device/google/accessory/demokit" path="device/google/accessory/demokit" revision="7dfe7f89a3b174709c773fe319531006e46440d9" upstream="master" />
+ <project groups="device,fugu,broadcom_pdk,generic_fs,pdk" name="device/google/atv" path="device/google/atv" revision="77b83d651007cb6998d4019e785133747836e4fe" upstream="master" />
+ <project groups="device,pdk" name="device/google/contexthub" path="device/google/contexthub" revision="aba2d911b9c29cb55fa22d95234f9c9b926b8c4d" upstream="master" />
+ <project groups="device,dragon,pdk" name="device/google/dragon" path="device/google/dragon" revision="d4f9a3abd2fa25b7b2ee3e03f85725fd043ea942" upstream="master" />
+ <project clone-depth="1" groups="device,dragon,pdk" name="device/google/dragon-kernel" path="device/google/dragon-kernel" revision="1f072d6ed7d6507fe04c6a410ceb93fd66a8cf8a" upstream="master" />
+ <project groups="device,flounder,pdk" name="device/htc/flounder" path="device/htc/flounder" revision="fcafaaa5db4e5de2fe51a54c716148b22966a476" upstream="master" />
+ <project clone-depth="1" groups="device,flounder,pdk" name="device/htc/flounder-kernel" path="device/htc/flounder-kernel" revision="d63338449c34b611e91b77d7f42bc7ad277074d7" upstream="master" />
+ <project groups="device,angler,broadcom_pdk,pdk" name="device/huawei/angler" path="device/huawei/angler" revision="22010bfa78d0057549baa6b6cb5777dcf3c2e031" upstream="master" />
+ <project clone-depth="1" groups="device,angler,broadcom_pdk,pdk" name="device/huawei/angler-kernel" path="device/huawei/angler-kernel" revision="c2f86ebe7a15500024908dd4f8d856d70ee8368e" upstream="master" />
+ <project groups="device,bullhead,pdk" name="device/lge/bullhead" path="device/lge/bullhead" revision="cb1e6e6fc70a41aa517a9384ff02548357c0b41d" upstream="master" />
+ <project clone-depth="1" groups="device,bullhead,pdk" name="device/lge/bullhead-kernel" path="device/lge/bullhead-kernel" revision="ce4d793d6ff4f6eb2c22a2c02c4607e23de6125e" upstream="master" />
+ <project name="device/linaro/bootloader/arm-trusted-firmware" path="device/linaro/bootloader/arm-trusted-firmware" revision="1f3ebb070b810262b7fd0538bf5eab2ccabc83a1" upstream="master" />
+ <project name="device/linaro/bootloader/edk2" path="device/linaro/bootloader/edk2" revision="0b5d535b3ac38d5a49326843d51f8e15c32345a9" upstream="master" />
+ <project name="device/linaro/bootloader/OpenPlatformPkg" path="device/linaro/bootloader/OpenPlatformPkg" revision="911a23ccd8744303788510039026b836f919262e" upstream="master" />
+ <project groups="device,hikey,pdk" name="device/linaro/hikey" path="device/linaro/hikey" revision="395b044f65745ba0c4f78a21bd8d9390c5aa9b2b" upstream="master" />
+ <project clone-depth="1" groups="device,hikey,pdk" name="device/linaro/hikey-kernel" path="device/linaro/hikey-kernel" revision="516305aee5a8b07f63f5efbce61a19fce43a1a28" upstream="master" />
+ <project groups="device,shamu,broadcom_pdk,generic_fs,pdk" name="device/moto/shamu" path="device/moto/shamu" revision="9be41289a687b862461cf565365d1c928a9cc367" upstream="master" />
+ <project clone-depth="1" groups="device,shamu,broadcom_pdk,generic_fs,pdk" name="device/moto/shamu-kernel" path="device/moto/shamu-kernel" revision="933b1d7d081e5e39fb522df22273c98d9037d0fc" upstream="master" />
+ <project groups="pdk" name="device/sample" path="device/sample" revision="cab49bc145456b9c64413897d246dcc49709512d" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/docs/source.android.com" path="docs/source.android.com" revision="8e26818ed85d164db9428f647c0d86bf8d71ae66" upstream="master" />
+ <project groups="pdk" name="platform/external/aac" path="external/aac" revision="ed91226cec35c24f4e0cc13bb46ffff98fb37f52" upstream="master" />
+ <project groups="adt-infra,notdefault,pdk-fs" name="platform/external/adt-infra" path="external/adt-infra" revision="3651ab1797b8dcd873b6aee7499ca0bc2bd95147" upstream="master" />
+ <project groups="pdk" name="platform/external/android-clat" path="external/android-clat" revision="2dcca6feb91fd917e26c4989af6400de904466a2" upstream="master" />
+ <project groups="pdk" name="platform/external/androidplot" path="external/androidplot" revision="c66727ebf001607cee14521c35bc852b55fd9845" upstream="master" />
+ <project groups="pdk" name="platform/external/ant-glob" path="external/ant-glob" revision="a73228afa9540b9c5518d360c5ae630bb634f975" upstream="master" />
+ <project groups="pdk" name="platform/external/antlr" path="external/antlr" revision="dd5fa6d48b827c5d98b625adbc209f4a05567534" upstream="master" />
+ <project groups="pdk" name="platform/external/apache-commons-math" path="external/apache-commons-math" revision="18f62fca59d387e3c1ccd8f80087d9c9af40bcc8" upstream="master" />
+ <project groups="pdk" name="platform/external/apache-harmony" path="external/apache-harmony" revision="5dfcde90a5aa33894ca65854492bed4b538478ad" upstream="master" />
+ <project groups="pdk" name="platform/external/apache-http" path="external/apache-http" revision="949206634ed87a0daebf1fcb153f74b3b77e21b6" upstream="master" />
+ <project groups="pdk" name="platform/external/apache-xml" path="external/apache-xml" revision="31d7642eb8f37a9166db7f1c9e313ab651bdb8ba" upstream="master" />
+ <project groups="pdk" name="platform/external/archive-patcher" path="external/archive-patcher" revision="81918d517897eb07da8eacbd03c57190c6d5edda" upstream="master" />
+ <project groups="pdk-fs" name="platform/external/autotest" path="external/autotest" revision="0e2cfe6b1b2c9df1ddf2f21fb2299658752ee5db" upstream="master" />
+ <project groups="pdk" name="platform/external/avahi" path="external/avahi" revision="c1058119e50353b4f95375a6ec032f90452ec38b" upstream="master" />
+ <project groups="pdk" name="platform/external/avb" path="external/avb" revision="0155e6b158bdc5b3a442f16a5dc124d5dee9c71c" upstream="master" />
+ <project groups="pdk" name="platform/external/bison" path="external/bison" revision="7467d52af437dd3dfea237a6865ab84bf258dee0" upstream="master" />
+ <project groups="pdk" name="platform/external/blktrace" path="external/blktrace" revision="d345431f16b8f76f30a58193ff2b26d5853e1109" upstream="master" />
+ <project groups="pdk" name="platform/external/boringssl" path="external/boringssl" revision="909b19f027eb0af12513f4d5589efdd67e34bd91" upstream="master" />
+ <project groups="pdk" name="platform/external/bouncycastle" path="external/bouncycastle" revision="f369a5f0c5d077a5b946d7b39149cee4225d6f84" upstream="master" />
+ <project groups="pdk" name="platform/external/bsdiff" path="external/bsdiff" revision="f26b5b670aadde708d3279d1eb153d28b226304c" upstream="master" />
+ <project groups="pdk" name="platform/external/bzip2" path="external/bzip2" revision="b0e77c4d65ab8dc9607b27cf5b36c38c83e85ada" upstream="master" />
+ <project groups="pdk" name="platform/external/c-ares" path="external/c-ares" revision="f4baf84f285bfbdebb89b2fef8a955720f00c677" upstream="master" />
+ <project groups="pdk" name="platform/external/caliper" path="external/caliper" revision="a67e4782f7f5bc2d7b3f9de6836fc333ef0e1bd1" upstream="master" />
+ <project groups="pdk" name="platform/external/cblas" path="external/cblas" revision="53ffe068d9bf5f9b0507a659159dab7bd9c18a49" upstream="master" />
+ <project groups="pdk" name="platform/external/ceres-solver" path="external/ceres-solver" revision="233e18458eeb29bf5e4f2b69c5334728c47a6d13" upstream="master" />
+ <project groups="pdk" name="platform/external/chromium-libpac" path="external/chromium-libpac" revision="bb0e12b4e09130018916c6d5ffc3e0344cb6a17b" upstream="master" />
+ <project groups="pdk" name="platform/external/chromium-trace" path="external/chromium-trace" revision="cd89f5ab72f61143619000946d1b2d8e2fb6a2d1" upstream="master" />
+ <project groups="pdk" name="platform/external/chromium-webview" path="external/chromium-webview" revision="7895f9be6b7af84d455af423bf60f3210fbb6b68" upstream="master" />
+ <project groups="pdk" name="platform/external/clang" path="external/clang" revision="b167d69629a571ef313688c02dfd0d7ddace8eb4" upstream="master" />
+ <project groups="pdk" name="platform/external/cmockery" path="external/cmockery" revision="9199c7bfafefea32d1884182fa655b6e4578c1c4" upstream="master" />
+ <project groups="pdk" name="platform/external/compiler-rt" path="external/compiler-rt" revision="a1b6d6325763d05aaf42df7c615f55afc66d9e11" upstream="master" />
+ <project groups="pdk" name="platform/external/conscrypt" path="external/conscrypt" revision="08241c36202dc88d286129d6f726be05ec805e69" upstream="master" />
+ <project groups="pdk" name="platform/external/crcalc" path="external/crcalc" revision="5559610bea3b267abef3273868f2d6b8c01409a5" upstream="master" />
+ <project groups="pdk" name="platform/external/cros/system_api" path="external/cros/system_api" revision="3134975bb960b2f844722e5f844025a23914e46e" upstream="master" />
+ <project groups="pdk" name="platform/external/curl" path="external/curl" revision="044d80e37a7ae71b229ba810fcb4bfd701d88ac4" upstream="master" />
+ <project groups="pdk" name="platform/external/dagger2" path="external/dagger2" revision="d65ad205f96455938a93529874ba2ee8d7063d3f" upstream="master" />
+ <project groups="pdk" name="platform/external/dbus" path="external/dbus" revision="d655f577be207c2a1b39406fdc6d4dd1b3c9c5d5" upstream="master" />
+ <project groups="pdk" name="platform/external/dbus-binding-generator" path="external/dbus-binding-generator" revision="8ed8a72c162b82527b2124cfae0b5ecf9c9b6289" upstream="master" />
+ <project groups="pdk-fs" name="platform/external/deqp" path="external/deqp" revision="b28ca98dd64d74475f78ac4b19309fc41076d26e" upstream="master" />
+ <project groups="pdk" name="platform/external/dexmaker" path="external/dexmaker" revision="7a2d6365f327afe4e7210d6f09faa02f19353dae" upstream="master" />
+ <project groups="pdk" name="platform/external/dhcpcd-6.8.2" path="external/dhcpcd-6.8.2" revision="b74b4ad38fa162d3fe09e035fa570f419261c7dd" upstream="master" />
+ <project groups="pdk" name="platform/external/dlmalloc" path="external/dlmalloc" revision="6661f3ca66b55d8f5a57b96fec97efaf8f3897a5" upstream="master" />
+ <project groups="pdk" name="platform/external/dng_sdk" path="external/dng_sdk" revision="c44c039ac1ffcb07e523c184733c1f11af0c296d" upstream="master" />
+ <project groups="pdk" name="platform/external/dnsmasq" path="external/dnsmasq" revision="cf2fce4acdf00d9a703c04d01b73bc332bcc61a7" upstream="master" />
+ <project groups="pdk" name="platform/external/doclava" path="external/doclava" revision="589d63dec2883e5ccae2fb07fdce41b0e3c2113c" upstream="master" />
+ <project groups="pdk" name="platform/external/donuts" path="external/donuts" revision="005389fb31a67841b5de42f0d25fbfab843e208c" upstream="master" />
+ <project groups="drm_gralloc,pdk-fs" name="platform/external/drm_gralloc" path="external/drm_gralloc" revision="63dd4c897693a614f9eedff817116f34590514ad" upstream="master" />
+ <project groups="drm_hwcomposer,pdk-fs" name="platform/external/drm_hwcomposer" path="external/drm_hwcomposer" revision="ff522c77d52cc8324ba0a505706fe5f1c5d30178" upstream="master" />
+ <project groups="pdk" name="platform/external/droiddriver" path="external/droiddriver" revision="4138eed6f44e06a72c6adcad1892008eb4b701e8" upstream="master" />
+ <project name="platform/external/dtc" path="external/dtc" revision="fe315a53517fd5792735428f0da232041a8cdce3" upstream="master" />
+ <project groups="pdk" name="platform/external/e2fsprogs" path="external/e2fsprogs" revision="058a64acf530f379a1ee91e43f58d1c042946251" upstream="master" />
+ <project groups="pdk" name="platform/external/easymock" path="external/easymock" revision="8bc2748f2850c88ab6b86c6ceed1ce65bed5ee16" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/external/eclipse-basebuilder" path="external/eclipse-basebuilder" revision="076f46888ed346775b8efc564d7694063ef5eea7" upstream="master" />
+ <project groups="pdk" name="platform/external/eclipse-windowbuilder" path="external/eclipse-windowbuilder" revision="c533332008088ee5f61745b724361fbde5b7e770" upstream="master" />
+ <project groups="pdk" name="platform/external/eigen" path="external/eigen" revision="110f39b2599856ffe6c7781b221b93d00b89d2b4" upstream="master" />
+ <project groups="pdk" name="platform/external/elfutils" path="external/elfutils" revision="7ece9f0c0f62d36142e25641438e0a859f951308" upstream="master" />
+ <project groups="pdk" name="platform/external/emma" path="external/emma" revision="deb5711b2635ee9d332605e2e00b63b32bed6777" upstream="master" />
+ <project groups="pdk" name="platform/external/esd" path="external/esd" revision="943c42b6f8e9afe821744aa4c039f4943ebf29f5" upstream="master" />
+ <project groups="pdk" name="platform/external/expat" path="external/expat" revision="12bddbf2b12be35a77daba2a35a4671f84a5c230" upstream="master" />
+ <project groups="pdk" name="platform/external/eyes-free" path="external/eyes-free" revision="16bd4c7a4d1bfe229068b637614dad7c48dd2ceb" upstream="master" />
+ <project groups="pdk" name="platform/external/f2fs-tools" path="external/f2fs-tools" revision="5bd943d50d103eb6f48a72f199a49dce4feeb61a" upstream="master" />
+ <project groups="pdk" name="platform/external/fdlibm" path="external/fdlibm" revision="fcd17254a9e59ceb0420d4a2faf29db6b004c443" upstream="master" />
+ <project groups="pdk" name="platform/external/fec" path="external/fec" revision="791afbe58ff9f55145c4adf632ab8cc9ca6e5686" upstream="master" />
+ <project groups="pdk" name="platform/external/fio" path="external/fio" revision="b4e247271a45f3e363c2f9afe2977038cc521f78" upstream="master" />
+ <project groups="pdk" name="platform/external/flac" path="external/flac" revision="3c74c11c30cdae95914c0b6a9228a3906b238c73" upstream="master" />
+ <project groups="pdk" name="platform/external/fonttools" path="external/fonttools" revision="fede58680958e96e6e8f61c3cc6282f798452c53" upstream="master" />
+ <project groups="pdk" name="platform/external/freetype" path="external/freetype" revision="a66c1d9aa97d87ec047c861201e7f1d6e4bc5ba7" upstream="master" />
+ <project groups="pdk" name="platform/external/fsck_msdos" path="external/fsck_msdos" revision="e5a5c8b9e97ff69a68266e79cca1614802833142" upstream="master" />
+ <project groups="pdk" name="platform/external/gemmlowp" path="external/gemmlowp" revision="6de57912630404dc2e210a392878c6d7cf51f1cd" upstream="master" />
+ <project groups="pdk,qcom_msm8x26" name="platform/external/giflib" path="external/giflib" revision="a57aff106d30e39c782e681f8e4aa3aa612f81c3" upstream="master" />
+ <project groups="pdk" name="platform/external/glide" path="external/glide" revision="31e64acd1e65045471124267a06241eff1f55d26" upstream="master" />
+ <project groups="pdk" name="platform/external/google-benchmark" path="external/google-benchmark" revision="ac64db9550713bd81fd50fc8d289ffa8a9c1270c" upstream="master" />
+ <project groups="dragon,pdk-fs" name="platform/external/google-breakpad" path="external/google-breakpad" revision="19324245f86f3532ca9f90f62f5b84fcdbb6ecbf" upstream="master" />
+ <project groups="pdk" name="platform/external/google-fonts/carrois-gothic-sc" path="external/google-fonts/carrois-gothic-sc" revision="0062a10458d4c357f3082d66bcb129d11913aaae" upstream="master" />
+ <project groups="pdk" name="platform/external/google-fonts/coming-soon" path="external/google-fonts/coming-soon" revision="2c5cb418c690815545bbb0316eae5fd33b9fc859" upstream="master" />
+ <project groups="pdk" name="platform/external/google-fonts/cutive-mono" path="external/google-fonts/cutive-mono" revision="bce2136662854076023066602526ba299e6556b2" upstream="master" />
+ <project groups="pdk" name="platform/external/google-fonts/dancing-script" path="external/google-fonts/dancing-script" revision="7b6623bd54cee3e48ae8a4f477f616366643cc78" upstream="master" />
+ <project groups="pdk" name="platform/external/google-tv-pairing-protocol" path="external/google-tv-pairing-protocol" revision="9726a938070e2f281219ef3187f54ef758e6a075" upstream="master" />
+ <project groups="pdk" name="platform/external/googletest" path="external/googletest" revision="766bcc63f841044e0ab1bd4fde35fd6226ff5b42" upstream="master" />
+ <project groups="pdk" name="platform/external/gptfdisk" path="external/gptfdisk" revision="2c054ab90ed2324602410bb505151a4677a7ef00" upstream="master" />
+ <project groups="pdk" name="platform/external/guava" path="external/guava" revision="0839ba9a609002a4b29290a55caa125e363e629e" upstream="master" />
+ <project groups="pdk" name="platform/external/guice" path="external/guice" revision="dd02cae677404f96e0359e51d0a77ac91174a057" upstream="master" />
+ <project groups="pdk" name="platform/external/hamcrest" path="external/hamcrest" revision="a80259979f535e973382fa9a41bc2fda0a4553b7" upstream="master" />
+ <project groups="pdk,qcom_msm8x26" name="platform/external/harfbuzz_ng" path="external/harfbuzz_ng" revision="e7490844b4f60a593c385dd6777047eb8b67ec01" upstream="master" />
+ <project groups="pdk" name="platform/external/hyphenation-patterns" path="external/hyphenation-patterns" revision="dedeff64279b77bafff72b6d866efc93e829b4ab" upstream="master" />
+ <project groups="pdk" name="platform/external/icu" path="external/icu" revision="bca042f28d616624108ecd9eebe97feda8504f79" upstream="master" />
+ <project groups="pdk" name="platform/external/iproute2" path="external/iproute2" revision="474d82b0625cfed5c30f670444d7e6aea780450c" upstream="master" />
+ <project groups="pdk" name="platform/external/ipsec-tools" path="external/ipsec-tools" revision="d43551bcaaba3e00f20a061a43a8a3f28329bf97" upstream="master" />
+ <project groups="pdk" name="platform/external/iptables" path="external/iptables" revision="8f090169d006a77dcf0a3eef8bda24fb20c32bb6" upstream="master" />
+ <project groups="pdk" name="platform/external/iputils" path="external/iputils" revision="15812b1548d303f2d20b3326dd0b1b4f3d0da703" upstream="master" />
+ <project groups="pdk" name="platform/external/iw" path="external/iw" revision="2750da337290a5407e3ee123cc664d4e5cc1820c" upstream="master" />
+ <project groups="pdk" name="platform/external/jacoco" path="external/jacoco" revision="36e54806edd3b7cce9c86ec8c93fdda2a96d60f4" upstream="master" />
+ <project groups="pdk" name="platform/external/jarjar" path="external/jarjar" revision="730d9b71d5d139d9fea676d9073dbfa1b71b61ab" upstream="master" />
+ <project groups="pdk" name="platform/external/javasqlite" path="external/javasqlite" revision="ee41d81872eedc632f42bd231b95bc945690d159" upstream="master" />
+ <project groups="pdk" name="platform/external/javassist" path="external/javassist" revision="f7c4b954072e563b75f6910c25bb689bbf38a3d1" upstream="master" />
+ <project groups="pdk" name="platform/external/jcommander" path="external/jcommander" revision="502ef60ae8299347b337c19ad9e831bb856d8c59" upstream="master" />
+ <project groups="pdk" name="platform/external/jdiff" path="external/jdiff" revision="98ed536fd800bf9ac02a9983da6dc08356a64fa3" upstream="master" />
+ <project groups="pdk" name="platform/external/jemalloc" path="external/jemalloc" revision="04d995d2adf440f9971aa261bb7ef8cac79eb52d" upstream="master" />
+ <project groups="pdk" name="platform/external/jetty" path="external/jetty" revision="7354b4f34e693165f4f49d154fe58844bf5d4706" upstream="master" />
+ <project groups="notdefault,tradefed,pdk-fs" name="platform/external/jline" path="external/jline" revision="74812032f8d8eddbef387f18c96de9e5c38b8fdb" upstream="master" />
+ <project groups="pdk" name="platform/external/jmdns" path="external/jmdns" revision="0c71647deb7f7835c473fd3dfb45943083e47794" upstream="master" />
+ <project groups="pdk" name="platform/external/jsilver" path="external/jsilver" revision="b9b84920ba47ddf7f15baa01c56b28e3d3f951ea" upstream="master" />
+ <project groups="pdk" name="platform/external/jsmn" path="external/jsmn" revision="cbfa1a02b28b588e16ec221519c6ec17b1c14568" upstream="master" />
+ <project groups="pdk" name="platform/external/jsoncpp" path="external/jsoncpp" revision="90c81b9c9aef09ef4ffb8de1779301734336d897" upstream="master" />
+ <project groups="pdk" name="platform/external/jsr305" path="external/jsr305" revision="8cb769f8fa9347c83c30c896c8d9084bd6bac756" upstream="master" />
+ <project groups="pdk" name="platform/external/jsr330" path="external/jsr330" revision="ded32186511387be56e353485f476262d6b14c66" upstream="master" />
+ <project groups="pdk" name="platform/external/junit" path="external/junit" revision="2e7552431cce028d5c9eba8e8cd5b2e4709215aa" upstream="master" />
+ <project groups="pdk" name="platform/external/junit-params" path="external/junit-params" revision="630c32b92bf3cc6179c211290016089d40319119" upstream="master" />
+ <project groups="pdk" name="platform/external/kernel-headers" path="external/kernel-headers" revision="64c647a785e762c65423322410c0c1b79b33e62b" upstream="master" />
+ <project groups="pdk" name="platform/external/ksoap2" path="external/ksoap2" revision="a3e5f27c89b4a64333d6dfb9b9588a1bb1537956" upstream="master" />
+ <project groups="pdk" name="platform/external/libavc" path="external/libavc" revision="4b6344b28159ddec7e9eb3de074d5e3affff178a" upstream="master" />
+ <project groups="pdk" name="platform/external/libbrillo" path="external/libbrillo" revision="d26b2a3274c08d59b47c87b0418f9df675a8a023" upstream="master" />
+ <project groups="pdk" name="platform/external/libcap" path="external/libcap" revision="02403a9504f6bb586c9894d48cef90bd2c9ff8b7" upstream="master" />
+ <project groups="pdk" name="platform/external/libcap-ng" path="external/libcap-ng" revision="835f318b9785a70415980fba96c34ee5c6b7e415" upstream="master" />
+ <project groups="pdk" name="platform/external/libchrome" path="external/libchrome" revision="7f89d64f891855e5a7fb4a52390dd3b463001a49" upstream="master" />
+ <project groups="pdk" name="platform/external/libconstrainedcrypto" path="external/libconstrainedcrypto" revision="fca75c837bebfbd51927156158de36fc517742f7" upstream="master" />
+ <project groups="pdk" name="platform/external/libcxx" path="external/libcxx" revision="10e979ace10d4cc73945acf060eeb1234e2fe834" upstream="master" />
+ <project groups="pdk" name="platform/external/libcxxabi" path="external/libcxxabi" revision="498f031515a9738abf584fcbf48773fdf23bae2e" upstream="master" />
+ <project groups="pdk" name="platform/external/libdivsufsort" path="external/libdivsufsort" revision="90d90b27c0b82cac20deabac79e97e274856eaf8" upstream="master" />
+ <project groups="pdk" name="platform/external/libdrm" path="external/libdrm" revision="ce429d08fb5d8eb20c9029746f018de9a508c065" upstream="master" />
+ <project groups="pdk" name="platform/external/libedit" path="external/libedit" revision="67e14dfc833aafa400a3aad8cb329cbaec503445" upstream="master" />
+ <project groups="pdk" name="platform/external/libdaemon" path="external/libdaemon" revision="e2f604066d97431c95856c73d7b9ee46b348d37e" upstream="master" />
+ <project groups="pdk" name="platform/external/libevent" path="external/libevent" revision="e6958d7c60a5a49b863271dee0aadae5afbbe924" upstream="master" />
+ <project groups="pdk" name="platform/external/libexif" path="external/libexif" revision="83884670140be8558eb330b6e68142b56ee46c42" upstream="master" />
+ <project groups="pdk" name="platform/external/libgsm" path="external/libgsm" revision="2f66c771f18317147e446fab5a95082d18a6db20" upstream="master" />
+ <project groups="pdk" name="platform/external/libhevc" path="external/libhevc" revision="a87cdb37caa4ccb3cd42ca6b9f0f8d17e9e99f82" upstream="master" />
+ <project groups="pdk" name="platform/external/libjpeg-turbo" path="external/libjpeg-turbo" revision="1b0f60e77f2d691fe4a12620f12efde8a22f8341" upstream="master" />
+ <project groups="pdk" name="platform/external/liblzf" path="external/liblzf" revision="a88b9629447deabe8697d2f8fd4cc70aa6e1b563" upstream="master" />
+ <project groups="pdk" name="platform/external/libmicrohttpd" path="external/libmicrohttpd" revision="1e68f5d827a859ba3b7ab6a70a60247e0b96afa5" upstream="master" />
+ <project groups="pdk" name="platform/external/libmojo" path="external/libmojo" revision="3d72bec9be57b7d199d2cbd7139308bb3c3c34bb" upstream="master" />
+ <project groups="pdk" name="platform/external/libmpeg2" path="external/libmpeg2" revision="b06addc8413286624d02e976b4ff6af5a41b9bee" upstream="master" />
+ <project groups="pdk" name="platform/external/libmtp" path="external/libmtp" revision="7ed2065db3ab9851f47426f3a35ba7045ce528b2" upstream="master" />
+ <project groups="pdk" name="platform/external/libnfc-nci" path="external/libnfc-nci" revision="c99d11b183159f3f72f3c1a1b241ce3983a292dd" upstream="master" />
+ <project groups="pdk" name="platform/external/libnl" path="external/libnl" revision="3f563dcf99d53a43799e5bd75d7761fcd9716c5a" upstream="master" />
+ <project groups="pdk" name="platform/external/libogg" path="external/libogg" revision="6dba790f3372d03eee07c693789166ca9fa07b0a" upstream="master" />
+ <project groups="pdk" name="platform/external/libopus" path="external/libopus" revision="e86ddedf1d6703aeac7c40e2f92aa7d365d62838" upstream="master" />
+ <project groups="pdk" name="platform/external/libpcap" path="external/libpcap" revision="7972587a89ed3966f8685d7f44c3f3674daf0115" upstream="master" />
+ <project groups="pdk" name="platform/external/libphonenumber" path="external/libphonenumber" revision="78b57ad8097f172cc084be1b15ee6d2fe446c79c" upstream="master" />
+ <project groups="pdk" name="platform/external/libpng" path="external/libpng" revision="b65760761bfec7570ec9a0090a708443eff2f68f" upstream="master" />
+ <project groups="pdk" name="platform/external/libunwind" path="external/libunwind" revision="e1069f1eb15a71dcb9d793951dbb5ce0ef97442f" upstream="master" />
+ <project groups="pdk" name="platform/external/libunwind_llvm" path="external/libunwind_llvm" revision="5dedb3c1c266802f328be58720adf736bfa38762" upstream="master" />
+ <project groups="pdk" name="platform/external/libusb" path="external/libusb" revision="c830898c125945feec317da1a0b359afeca766c9" upstream="master" />
+ <project groups="pdk" name="platform/external/libusb-compat" path="external/libusb-compat" revision="759481ae400d02fe99488dcdcd653b4f8139a39c" upstream="master" />
+ <project groups="pdk" name="platform/external/libutf" path="external/libutf" revision="853ef375ba2ce430f5b13b556812b2d83ccc82e1" upstream="master" />
+ <project groups="pdk" name="platform/external/libvncserver" path="external/libvncserver" revision="bec50fdbf6ec2d61edda4f69913f8a8331b8b364" upstream="master" />
+ <project groups="pdk" name="platform/external/libvorbis" path="external/libvorbis" revision="dee2e0a12822ec62c89c2d7582f1a7d32cd0875b" upstream="master" />
+ <project groups="pdk" name="platform/external/libvpx" path="external/libvpx" revision="8a88e03ea00afe1c5cb223e1036ad2cb04c3d132" upstream="master" />
+ <project groups="pdk" name="platform/external/libvterm" path="external/libvterm" revision="6d78f36633063dad0689ca42be1ad8d0313ebfab" upstream="master" />
+ <project groups="pdk" name="platform/external/libweave" path="external/libweave" revision="d4eabd105d038610c2722f8e6bdff5f5428265c0" upstream="master" />
+ <project groups="pdk,libxml2" name="platform/external/libxml2" path="external/libxml2" revision="1b93209e5c65584a80238f52c9e87f57a48a9a59" upstream="master" />
+ <project groups="pdk,libyuv" name="platform/external/libyuv" path="external/libyuv" revision="bb74e3e19b98261031216de8cadcef34cccd9e4a" upstream="master" />
+ <project groups="pdk" name="platform/external/littlemock" path="external/littlemock" revision="a3ea6bf9e6fcf63b69f5635b2ecc1fbd9a9a4dfc" upstream="master" />
+ <project groups="pdk" name="platform/external/lld" path="external/lld" revision="26c9bb3b51a7ac4bc45f73d532a03cfd9982043a" upstream="master" />
+ <project groups="pdk" name="platform/external/llvm" path="external/llvm" revision="83d37657af4dcd64af0eae6d63ee476654b8a2c2" upstream="master" />
+ <project groups="pdk" name="platform/external/lz4" path="external/lz4" revision="32b7e86f6814e15b3aaf0b870142ab6c819da27c" upstream="master" />
+ <project groups="pdk" name="platform/external/lzma" path="external/lzma" revision="17dda56f12d674e22266c2aadfcce1c945b2bb2b" upstream="master" />
+ <project groups="pdk" name="platform/external/markdown" path="external/markdown" revision="06a8f8b914d477183f68b84424bce8ff4dae7e84" upstream="master" />
+ <project groups="pdk" name="platform/external/mdnsresponder" path="external/mdnsresponder" revision="55aee11ccdc0ea5ed62ef68a352c3aca112ea01c" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/external/mesa3d" path="external/mesa3d" revision="586cfa78861e71160d5a3dbeaff09554829a1027" upstream="master" />
+ <project groups="pdk" name="platform/external/messageformat" path="external/messageformat" revision="ff217fd086ecac137321c1265d0cc1a15194decf" upstream="master" />
+ <project groups="pdk" name="platform/external/Microsoft-GSL" path="external/Microsoft-GSL" revision="2df00914687875d2141dc912b788b5dc6b27abde" upstream="master" />
+ <project groups="pdk" name="platform/external/minijail" path="external/minijail" revision="7559dfe9ed16455e03f68d9aa0a5a65747e6a174" upstream="master" />
+ <project groups="pdk" name="platform/external/mksh" path="external/mksh" revision="cd79a34d28dc40ea887af65aec71e35b74696afd" upstream="master" />
+ <project groups="pdk" name="platform/external/mmc-utils" path="external/mmc-utils" revision="6de31b2b8a09108f9b91bc12bee2b792f783dc62" upstream="master" />
+ <project groups="pdk" name="platform/external/mockftpserver" path="external/mockftpserver" revision="56527f4f9cf6e19136c55e8e6f18fd56da628d86" upstream="master" />
+ <project groups="pdk" name="platform/external/mockito" path="external/mockito" revision="c76860ffac1484ff29dc8966e8a7751ba5c20700" upstream="master" />
+ <project groups="pdk" name="platform/external/mockwebserver" path="external/mockwebserver" revision="aa66709b3b05ec77c477a68d30d677b4da78d33f" upstream="master" />
+ <project groups="pdk" name="platform/external/modp_b64" path="external/modp_b64" revision="ca8a085abe944c279055910b4d97d0517c759a1d" upstream="master" />
+ <project groups="pdk" name="platform/external/mp4parser" path="external/mp4parser" revision="88bd0c6cfa7fcfbbebcc15c2c565f714cb36b065" upstream="master" />
+ <project groups="pdk" name="platform/external/mtpd" path="external/mtpd" revision="46998029d80d5e486940c3c6b641569229349641" upstream="master" />
+ <project groups="pdk" name="platform/external/nanohttpd" path="external/nanohttpd" revision="c6783c32331b3fe78ffd5077e7f180995d0e268e" upstream="master" />
+ <project groups="pdk" name="platform/external/nanopb-c" path="external/nanopb-c" revision="b225eb267957f24ca2aa2469bc78d418b2e9b795" upstream="master" />
+ <project groups="pdk" name="platform/external/naver-fonts" path="external/naver-fonts" revision="91e6e9f94d1d769a8f742649674149ba98ce7d45" upstream="master" />
+ <project groups="pdk" name="platform/external/netcat" path="external/netcat" revision="b023a43765b15f0b0fd5b52b7d8021f515c59c23" upstream="master" />
+ <project groups="pdk" name="platform/external/netperf" path="external/netperf" revision="e100a0c4105b94f7044b243287a5da22f5e8e4e7" upstream="master" />
+ <project groups="pdk" name="platform/external/neven" path="external/neven" revision="95e6c4663c640c67a69e1d2b44696753282136f8" upstream="master" />
+ <project groups="pdk" name="platform/external/nfacct" path="external/nfacct" revision="833985690db54f9ad3ee7e8f3147a67da8c04760" upstream="master" />
+ <project groups="pdk" name="platform/external/nist-pkits" path="external/nist-pkits" revision="89fb1b9fdc7f5c48d4983fad62af62c1b780e428" upstream="master" />
+ <project groups="pdk" name="platform/external/nist-sip" path="external/nist-sip" revision="8445067c3b53e5bd56c32f1c4fb688083c4f4005" upstream="master" />
+ <project groups="pdk" name="platform/external/noto-fonts" path="external/noto-fonts" revision="f57026f635e85b868c88931bf0e233756bdd7e3f" upstream="master" />
+ <project groups="pdk" name="platform/external/oauth" path="external/oauth" revision="49f3624a6d3307b640a012f15b94d04174473501" upstream="master" />
+ <project groups="pdk" name="platform/external/objenesis" path="external/objenesis" revision="027386d6375a3cb34d9934fd952039254831cbc6" upstream="master" />
+ <project groups="pdk" name="platform/external/okhttp" path="external/okhttp" revision="7cf56bd49b8d529696b080a1ca6b69ae635b8233" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/external/opencv" path="external/opencv" revision="35dcc67558b63d08c47cd4bb3f989d7a9b77b884" upstream="master" />
+ <project groups="pdk" name="platform/external/opencv3" path="external/opencv3" revision="0f2a8a9b749849ebda41581b14df8fc6b55df859" upstream="master" />
+ <project groups="pdk" name="platform/external/owasp/sanitizer" path="external/owasp/sanitizer" revision="bbfb25464ff30c5a62dce351d719a8c533afb2a3" upstream="master" />
+ <project groups="pdk" name="platform/external/parameter-framework" path="external/parameter-framework" revision="5a0a6e5203f813391ecb8ae385b2e9bc7130d2ab" upstream="master" />
+ <project groups="pdk" name="platform/external/pcre" path="external/pcre" revision="0b62c4871d43f3602585d34decfc044a836283a6" upstream="master" />
+ <project groups="pdk" name="platform/external/pdfium" path="external/pdfium" revision="1d14fc6f33e7adb95c7a10003ba5d52846bad4f7" upstream="master" />
+ <project groups="pdk" name="platform/external/piex" path="external/piex" revision="8f540f64b6c170a16fb7e6e52d61819705c1522a" upstream="master" />
+ <project groups="pdk" name="platform/external/ppp" path="external/ppp" revision="d9aeba443abce801cd696b3921a559cab88e1e74" upstream="master" />
+ <project groups="pdk" name="platform/external/proguard" path="external/proguard" revision="af893cdc1242e4cfbeeb10e2323f9a689a825177" upstream="master" />
+ <project groups="pdk" name="platform/external/protobuf" path="external/protobuf" revision="7d33a02d1558092751915062611de4b29b998a67" upstream="master" />
+ <project groups="pdk" name="platform/external/regex-re2" path="external/regex-re2" revision="79cce43a82abc1bc56c65de07a7df47d54e163a9" upstream="master" />
+ <project groups="pdk" name="platform/external/replicaisland" path="external/replicaisland" revision="374426a588bfffc7d0657bffe05b65660b438007" upstream="master" />
+ <project groups="pdk" name="platform/external/rmi4utils" path="external/rmi4utils" revision="40eb2d785d3e367c01fc2a3d53820550e7f66739" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/external/robolectric" path="external/robolectric" revision="f522a207e60af3e8cf895e681fb25278219dc11c" upstream="master" />
+ <project groups="pdk" name="platform/external/roboto-fonts" path="external/roboto-fonts" revision="25fa2dde3aacaeacb2797bcffddce96014f4fcc3" upstream="master" />
+ <project groups="pdk" name="platform/external/rootdev" path="external/rootdev" revision="cd960ab4e9bcd9b6be3cd531eb63e9e73edfe438" upstream="master" />
+ <project groups="pdk" name="platform/external/safe-iop" path="external/safe-iop" revision="cd76f998688d145235de78ecd5b340d0eac9239d" upstream="master" />
+ <project groups="pdk" name="platform/external/scrypt" path="external/scrypt" revision="b442b13773a8b79a99a03af14af0e8dc1de85cba" upstream="master" />
+ <project name="platform/external/seccomp-tests" path="external/seccomp-tests" revision="3efdf98e3541de21d79fa6347bb2796c5e1363b5" upstream="master" />
+ <project groups="pdk" name="platform/external/selinux" path="external/selinux" revision="d4f63e1bc6aca0525b2c2e535868226b03084803" upstream="master" />
+ <project groups="pdk" name="platform/external/shflags" path="external/shflags" revision="c4876e01829b8cf110ee33267bb1bad1f8ebb51d" upstream="master" />
+ <project groups="pdk,qcom_msm8x26" name="platform/external/sfntly" path="external/sfntly" revision="61657b2d87b88576ab1b7d7fb5768b0e315c9216" upstream="master" />
+ <project groups="pdk,qcom_msm8x26" name="platform/external/skia" path="external/skia" revision="9bbadff63736989ebedfe805caf0bf31ed38c190" upstream="master" />
+ <project groups="pdk" name="platform/external/sl4a" path="external/sl4a" revision="2dd604a55f38d0d6401d7e5c3c440b622d4eccf4" upstream="master" />
+ <project groups="pdk" name="platform/external/slf4j" path="external/slf4j" revision="037a293cd09178f946d5df43e8dff1ad8758c163" upstream="master" />
+ <project groups="pdk" name="platform/external/smali" path="external/smali" revision="cb5c5daefce2792ce8641d01eb8bfb7f0b02bd10" upstream="master" />
+ <project groups="pdk" name="platform/external/snakeyaml" path="external/snakeyaml" revision="40acbd1fe67b448e47f7e50208f306d9a3a7c4b5" upstream="master" />
+ <project groups="pdk" name="platform/external/sonic" path="external/sonic" revision="0227d834966b304525869d88fd20b2ac835df878" upstream="master" />
+ <project name="platform/external/spirv-llvm" path="external/spirv-llvm" revision="0e839d7e9d7e5a096949e39a4c6e00670c8dee64" upstream="master" />
+ <project groups="pdk" name="platform/external/sonivox" path="external/sonivox" revision="070e31de1ec6964cbd78215ef97e5dec9395a380" upstream="master" />
+ <project groups="pdk" name="platform/external/speex" path="external/speex" revision="1752ac1e55a894f2c372b544cb4843ce9605e906" upstream="master" />
+ <project groups="pdk" name="platform/external/sqlite" path="external/sqlite" revision="572c81a44448b94f68cfd28f6a64a9b597c40314" upstream="master" />
+ <project groups="pdk" name="platform/external/squashfs-tools" path="external/squashfs-tools" revision="a16d3bd985d76f2358e09689d808377c528f2b20" upstream="master" />
+ <project groups="pdk" name="platform/external/srtp" path="external/srtp" revision="ab8d27c7566de29e3a0af3f2324036e8d5646d76" upstream="master" />
+ <project groups="pdk" name="platform/external/strace" path="external/strace" revision="ecaf114d8d7a670aa52e91acbfa43837a57e3558" upstream="master" />
+ <project groups="pdk" name="platform/external/svox" path="external/svox" revision="b2d04a95a8d7f5edeb4fe4d7b2febbd7b5b265c2" upstream="master" />
+ <project groups="pdk" name="platform/external/tagsoup" path="external/tagsoup" revision="9c02d9f506855965ec513685788890dfc856a5bc" upstream="master" />
+ <project groups="pdk" name="platform/external/testng" path="external/testng" revision="aa60862b5f2758a7d2f80f1ca224b310de1d674d" upstream="master" />
+ <project groups="pdk" name="platform/external/tcpdump" path="external/tcpdump" revision="8d9e2b3066ed0100523efb4c656f456759006b51" upstream="master" />
+ <project groups="pdk" name="platform/external/timezonepicker-support" path="external/timezonepicker-support" revision="99e91a76fd74bad10266623d67cdb98d011f709e" upstream="master" />
+ <project groups="pdk" name="platform/external/tinyalsa" path="external/tinyalsa" revision="4e3bf637057e8de60e6d0243c35a9fef698f9e2c" upstream="master" />
+ <project groups="pdk" name="platform/external/tinycompress" path="external/tinycompress" revision="379571405632a407b8ad0e4cea6a3ec86e5703c1" upstream="master" />
+ <project groups="pdk" name="platform/external/tinyxml" path="external/tinyxml" revision="b162e864bd02bb79423b4ef01d0e5e5840aa416b" upstream="master" />
+ <project groups="pdk" name="platform/external/tinyxml2" path="external/tinyxml2" revision="c04f69a047fb68e4041a47bf2ae760c047c0baf0" upstream="master" />
+ <project groups="pdk" name="platform/external/tlsdate" path="external/tlsdate" revision="5a3de7f1137f650c5b4da38fcf3da3a00be905d2" upstream="master" />
+ <project groups="pdk" name="platform/external/toybox" path="external/toybox" revision="ccb4be08fad7e626d69f1a063d3032438ff9900b" upstream="master" />
+ <project groups="pdk" name="platform/external/tpm2" path="external/tpm2" revision="c5912a41a95c992137228de2466d616080a5f61d" upstream="master" />
+ <project groups="pdk" name="platform/external/tremolo" path="external/tremolo" revision="23ee6597a66a0963e2c69238138a286f8aaf0885" upstream="master" />
+ <project groups="pdk" name="platform/external/unicode" path="external/unicode" revision="92ae6009367b0affedfcb364aa21074ed909127e" upstream="master" />
+ <project groups="pdk" name="platform/external/v8" path="external/v8" revision="8f88a037d1053f646dbef380367f542e6b9eb26b" upstream="master" />
+ <project groups="pdk" name="platform/external/valgrind" path="external/valgrind" revision="3a6eb653edc92b4717d19178f8f98b194eba173f" upstream="master" />
+ <project groups="vboot,pdk-fs" name="platform/external/vboot_reference" path="external/vboot_reference" revision="495cf5a9f329f9bdf501579c9a992edffbd883be" upstream="master" />
+ <project groups="pdk" name="platform/external/vixl" path="external/vixl" revision="3f4fb67ee6479e65b4e0ad1a6a52782c4ac7f2d6" upstream="master" />
+ <project groups="pdk" name="platform/external/vogar" path="external/vogar" revision="de2f250e56abb8b08cea424235a308d59115d6cb" upstream="master" />
+ <project groups="pdk" name="platform/external/vulkan-validation-layers" path="external/vulkan-validation-layers" revision="9a4ddc16e893b24b7e48adbff2a2de57d6e73641" upstream="master" />
+ <project groups="pdk,qcom_msm8x26" name="platform/external/webp" path="external/webp" revision="9952695a9090668326a9bcbd4e44928525d6d49b" upstream="master" />
+ <project groups="pdk" name="platform/external/webrtc" path="external/webrtc" revision="b33ba455c4b07b719bf1982ad3e2121709be1c62" upstream="master" />
+ <project groups="pdk" name="platform/external/wpa_supplicant_8" path="external/wpa_supplicant_8" revision="191714908c4486bae9dd75dbc64be518626a353d" upstream="master" />
+ <project groups="pdk" name="platform/external/xmlrpcpp" path="external/xmlrpcpp" revision="1d7192fe0d2f788f8f150899c58b86c5ff10456a" upstream="master" />
+ <project groups="pdk" name="platform/external/xmlwriter" path="external/xmlwriter" revision="e95d92246ee35273dde2bee8b00485cc14c12be5" upstream="master" />
+ <project groups="pdk" name="platform/external/xmp_toolkit" path="external/xmp_toolkit" revision="42ea4dc6d1fc2206a7778029070ed9213e3b0fbf" upstream="master" />
+ <project groups="pdk" name="platform/external/zlib" path="external/zlib" revision="ece4b39a7a3a4a32927df37e0b80f68f51613a5d" upstream="master" />
+ <project groups="pdk" name="platform/external/zopfli" path="external/zopfli" revision="c01121c52279a02f503bec3275ffb61fd837b555" upstream="master" />
+ <project groups="pdk" name="platform/external/zxing" path="external/zxing" revision="fedf8f2d8099bf7bb85dc3db8699343d56617deb" upstream="master" />
+ <project groups="pdk" name="platform/frameworks/av" path="frameworks/av" revision="fa81ba8775c497c229078c25bcf8374e8420ea18" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/base" path="frameworks/base" revision="99d6227782844e42d7060b14d320cfd66a536798" upstream="master" />
+ <project groups="pdk" name="platform/frameworks/compile/libbcc" path="frameworks/compile/libbcc" revision="28840649ba797dadf4bfb5fdb2acd065197fbdb6" upstream="master" />
+ <project groups="pdk" name="platform/frameworks/compile/mclinker" path="frameworks/compile/mclinker" revision="fa083b9e91969ed6f15fac2fffb619c9e82746b1" upstream="master" />
+ <project groups="pdk" name="platform/frameworks/compile/slang" path="frameworks/compile/slang" revision="32d08d8d740b9f242a3d1d2ed6d6b963d7187b60" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/ex" path="frameworks/ex" revision="6033321c6f8305aa2f35b6f9c33169d69ec6fc91" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/minikin" path="frameworks/minikin" revision="00b9a21209f6a9117f4915eb3fc074564e21aadd" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/ml" path="frameworks/ml" revision="4745c0f6f00ddc50bd2464589384e74e22253d0c" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/multidex" path="frameworks/multidex" revision="a2e21e78613b5b5ceaadadc2989c592cec6a661c" upstream="master" />
+ <project groups="pdk" name="platform/frameworks/native" path="frameworks/native" revision="b10d705f517b4faa5884e963db71806445d79a31" upstream="master" />
+ <project groups="pdk-fs" name="platform/frameworks/opt/bitmap" path="frameworks/opt/bitmap" revision="a0d4e3108663202564a6833b76770075b8e5b767" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/bluetooth" path="frameworks/opt/bluetooth" revision="b39b00b3dfdb634d96758f378498aca4caac499d" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/calendar" path="frameworks/opt/calendar" revision="03b18577f8f8f799e87a62b8e03889ddacf6daa2" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/chips" path="frameworks/opt/chips" revision="3bf4c863b357124e421f6d6732ff7b802d9b4260" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/colorpicker" path="frameworks/opt/colorpicker" revision="c1d5eb2d31fb6a74ed129722754139c759aedbe8" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/datetimepicker" path="frameworks/opt/datetimepicker" revision="196f8c05f7585b3fcce1f766750cad4329c58656" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/emoji" path="frameworks/opt/emoji" revision="92eede13edbf22b501edb9aeb92366f91eab9781" upstream="master" />
+ <project groups="pdk-fs" name="platform/frameworks/opt/inputconnectioncommon" path="frameworks/opt/inputconnectioncommon" revision="3baece9b20fa480da46d860acd7320fd9eee3386" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/inputmethodcommon" path="frameworks/opt/inputmethodcommon" revision="990fcb1a6dbb5d1204cc8ec86e4bc3f691f4aeeb" upstream="master" />
+ <project groups="pdk-fs" name="platform/frameworks/opt/net/ethernet" path="frameworks/opt/net/ethernet" revision="8060ae0aeeea47c8fbe2896b0af3b1dd69f923a3" upstream="master" />
+ <project groups="frameworks_ims,pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/net/ims" path="frameworks/opt/net/ims" revision="0110b7d9c4e20228931fa915a80f0373b1104762" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/net/voip" path="frameworks/opt/net/voip" revision="9581957a57cb625ca9124bc9d92fcf080058e083" upstream="master" />
+ <project groups="pdk" name="platform/frameworks/opt/net/wifi" path="frameworks/opt/net/wifi" revision="dc6361c58a4eb2e7dd931ffe1cc0fb5129f004c9" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/photoviewer" path="frameworks/opt/photoviewer" revision="445813db01b5b2f50e80f6baea3254bf16f965bc" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/setupwizard" path="frameworks/opt/setupwizard" revision="218e756c3e1e74a8c63ef7a55644af2cc69c880b" upstream="master" />
+ <project groups="pdk" name="platform/frameworks/opt/telephony" path="frameworks/opt/telephony" revision="3ae7236b50c94b24410af3bffa061a961a691984" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/timezonepicker" path="frameworks/opt/timezonepicker" revision="965bd15605d8d7b3d06442263b368b025b05b0e6" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/opt/vcard" path="frameworks/opt/vcard" revision="03bfa415fab150deb666b1b5aad5913511bd7d33" upstream="master" />
+ <project groups="pdk" name="platform/frameworks/rs" path="frameworks/rs" revision="efe97b05b6bb330a7c4ddc698ac485cfaa7af833" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/support" path="frameworks/support" revision="d732fd4f367018165b3881284cf588af9a86fbcf" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/data-binding" path="frameworks/data-binding" revision="134eecccdce6a36174a3b10131249303ecab2201" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/volley" path="frameworks/volley" revision="d20f9d3f2375c0eed25dbb76288f611328dcd5ab" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/webview" path="frameworks/webview" revision="33b7fd3f1361d02e941320b606cc7957edc12f4a" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/frameworks/wilhelm" path="frameworks/wilhelm" revision="bb45b9aff58c5d1bb20c70ddb0a086886ffc0ffe" upstream="master" />
+ <project group="pdk" name="platform/hardware/akm" path="hardware/akm" revision="7201e557ef616d931adeaa25ded2bd4542ef324a" upstream="master" />
+ <project groups="pdk" name="platform/hardware/broadcom/libbt" path="hardware/broadcom/libbt" revision="7f75cbfa70784045c741a60a93581d3b5f7985f5" upstream="master" />
+ <project groups="pdk,broadcom_wlan" name="platform/hardware/broadcom/wlan" path="hardware/broadcom/wlan" revision="a8ea89887b8aa9a0dd6f913c87b194e69c758f09" upstream="master" />
+ <project groups="pdk" name="platform/hardware/google/apf" path="hardware/google/apf" revision="ae7070075ebcbc8e262bc8897616f4b671e38d46" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/audio_media" path="hardware/intel/audio_media" revision="218f0d6bc9532d0b707ea325fd96998249fe47bf" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/bootstub" path="hardware/intel/bootstub" revision="c759e5127aa582ac515ee1446da15f601b15a99a" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/common/bd_prov" path="hardware/intel/common/bd_prov" revision="8af329f2d2b54dfcfa84051d3ce1fae95f79011a" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/common/libmix" path="hardware/intel/common/libmix" revision="d2a3d6ec324541ea5ae722d71d5137c02d9a44eb" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/common/libstagefrighthw" path="hardware/intel/common/libstagefrighthw" revision="51db5bb05715b75ae137e97a98c9a1a8c9a66aa4" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/common/libva" path="hardware/intel/common/libva" revision="581669db5974cdcb477d088bf74b73c11b3bc007" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/common/libwsbm" path="hardware/intel/common/libwsbm" revision="ac747113d4f6739b1462ca7fb40f2091691e209b" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/common/omx-components" path="hardware/intel/common/omx-components" revision="7f6133ff56bd48a9afd19aac2575c6f687f1d00a" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/common/utils" path="hardware/intel/common/utils" revision="1910b7bf5c493bc0d876e5be2b43653dca730a3c" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/common/wrs_omxil_core" path="hardware/intel/common/wrs_omxil_core" revision="058936abe360c150890bb735e7f9b8e07e316620" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/img/hwcomposer" path="hardware/intel/img/hwcomposer" revision="0b9b9ee0b8436147a74734c0a7f86e8258d38170" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/img/psb_headers" path="hardware/intel/img/psb_headers" revision="2212f33af7e208a3b9e3ab2502fc902c9b464010" upstream="master" />
+ <project groups="intel,pdk" name="platform/hardware/intel/img/psb_video" path="hardware/intel/img/psb_video" revision="a805f1c63455a00883e066119fb0c5d533d3a116" upstream="master" />
+ <project groups="intel_sensors,pdk" name="platform/hardware/intel/sensors" path="hardware/intel/sensors" revision="68dc9e70b79dacddc4e0bf00af0de7f764b04eed" upstream="master" />
+ <project groups="pdk" name="platform/hardware/interfaces" path="hardware/interfaces" revision="33e87b8a76d6b5096f19da0a992d233fed370716" upstream="master" />
+ <project groups="invensense,pdk" name="platform/hardware/invensense" path="hardware/invensense" revision="11e5ff75af866f91622b6008fa13db1c3685ae69" upstream="master" />
+ <project groups="pdk" name="platform/hardware/libhardware" path="hardware/libhardware" revision="a24fcc412f8564f4ae98060724dc338fe51a8398" upstream="master" />
+ <project groups="pdk" name="platform/hardware/libhardware_legacy" path="hardware/libhardware_legacy" revision="815654a3d8257e8fc29354af089969168dc0fee5" upstream="master" />
+ <project groups="marvell_bt,pdk" name="platform/hardware/marvell/bt" path="hardware/marvell/bt" revision="3f33d194e8300816b94d1d7b68b1d48c8f903251" upstream="master" />
+ <project groups="qcom,qcom_audio,pdk" name="platform/hardware/qcom/audio" path="hardware/qcom/audio" revision="d29a5ecbbed1410b343a7e84b33f9903bfc0b012" upstream="master" />
+ <project groups="pdk" name="platform/hardware/qcom/bootctrl" path="hardware/qcom/bootctrl" revision="7c119e08ce46a5a655a8320b2e548572a827999c" upstream="master" />
+ <project groups="qcom,pdk" name="platform/hardware/qcom/bt" path="hardware/qcom/bt" revision="b9e99b576d94c1cff95960c6bf7ec8f0b272f7c6" upstream="master" />
+ <project groups="qcom,pdk" name="platform/hardware/qcom/camera" path="hardware/qcom/camera" revision="7bd85b4ae86232adc73483323b257ccc22591d40" upstream="master" />
+ <project groups="qcom,pdk" name="platform/hardware/qcom/data/ipacfg-mgr" path="hardware/qcom/data/ipacfg-mgr" revision="cc379d116231083e1b804fea9b0ef52d9e105ae1" upstream="master" />
+ <project groups="pdk,qcom,qcom_display" name="platform/hardware/qcom/display" path="hardware/qcom/display" revision="ce3c0609620f02621efb7531811e55c2295c2ade" upstream="master" />
+ <project groups="qcom,qcom_gps,pdk" name="platform/hardware/qcom/gps" path="hardware/qcom/gps" revision="05ef6c51f694c27068afe280d668b7a430cc503e" upstream="master" />
+ <project groups="qcom,qcom_keymaster,pdk" name="platform/hardware/qcom/keymaster" path="hardware/qcom/keymaster" revision="543927ba34c345d5c70a79613145f671a23b69b1" upstream="master" />
+ <project groups="qcom,pdk" name="platform/hardware/qcom/media" path="hardware/qcom/media" revision="a63ba0256cfd3e1baf3e5ffbc6bdefaab0f54a49" upstream="master" />
+ <project groups="qcom_msm8960,pdk" name="platform/hardware/qcom/msm8960" path="hardware/qcom/msm8960" revision="c25a431842a26b5756b58a9d4a42c776e0457ba2" upstream="master" />
+ <project groups="qcom_msm8994,pdk" name="platform/hardware/qcom/msm8994" path="hardware/qcom/msm8994" revision="60aaa00b2e0d537c0f51631dd88d919fc1baf439" upstream="master" />
+ <project groups="qcom_msm8996" name="platform/hardware/qcom/msm8996" path="hardware/qcom/msm8996" revision="48ac587e6b8f7438860bf68a2deffa7e7902fd42" upstream="master" />
+ <project groups="qcom_msm8x26,pdk" name="platform/hardware/qcom/msm8x26" path="hardware/qcom/msm8x26" revision="8b098e346cf0899037c10e3a2e7846a7014f0f1f" upstream="master" />
+ <project groups="qcom_msm8x27,pdk" name="platform/hardware/qcom/msm8x27" path="hardware/qcom/msm8x27" revision="8ff5c0057cbdecfa09410c1710ba043e191a2862" upstream="master" />
+ <project groups="pdk,qcom_msm8x74" name="platform/hardware/qcom/msm8x74" path="hardware/qcom/msm8x74" revision="2b96ffd283fd14d7d153b6b66680da98548679fd" upstream="master" />
+ <project groups="qcom_msm8x84,pdk" name="platform/hardware/qcom/msm8x84" path="hardware/qcom/msm8x84" revision="582b414269d8472d17eef65d8a8965aa8105042f" upstream="master" />
+ <project groups="qcom,pdk" name="platform/hardware/qcom/power" path="hardware/qcom/power" revision="3a098ee1f89c398b9d6e7b5dfae9c694994f8bc4" upstream="master" />
+ <project groups="qcom_wlan,pdk" name="platform/hardware/qcom/wlan" path="hardware/qcom/wlan" revision="1af864da9125aeb4b87435d99a312d0cbd222094" upstream="master" />
+ <project groups="pdk" name="platform/hardware/ril" path="hardware/ril" revision="51f0cc93af43c4ef845925dfd90ad95fa823d56e" upstream="master" />
+ <project name="kernel/tests" path="kernel/tests" revision="2fc2b448a0f4279bce164bd0a650f31796479e17" upstream="master" />
+ <project groups="pdk" name="platform/libcore" path="libcore" revision="8c1a9334edc754752fc88a3823dd4c21494de1b9" upstream="master" />
+ <project groups="pdk" name="platform/libnativehelper" path="libnativehelper" revision="b1d6844f1d6d3da548a2247e70cd85b706f74e03" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/apps/BasicSmsReceiver" path="packages/apps/BasicSmsReceiver" revision="7bbd1e4029b33ce598035a9767bdaf8433865e23" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/apps/Bluetooth" path="packages/apps/Bluetooth" revision="8fc01bd0eb8bb3f4c28661bbe7d8edf7c6be41c4" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Browser2" path="packages/apps/Browser2" revision="c24a10e63dba19b85b42590b968193053d43cd92" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Calculator" path="packages/apps/Calculator" revision="e1bda28f1545111ba7734577f23e194bf3db3f5f" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Calendar" path="packages/apps/Calendar" revision="2bbf7b8414ebcff2207b08224d8913241a766a5a" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Camera2" path="packages/apps/Camera2" revision="f16c21a499c4fec2339f0527408e9d0d7a33b21d" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/apps/CarrierConfig" path="packages/apps/CarrierConfig" revision="289dd7c3c3a7c284a88bef7779afae86251b86c5" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/apps/CellBroadcastReceiver" path="packages/apps/CellBroadcastReceiver" revision="489754bc92c14640408baa0a60b46e82bfc20822" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/apps/CertInstaller" path="packages/apps/CertInstaller" revision="d92ad87497a111256107eb7b06e0bfd1ceafc5cb" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Contacts" path="packages/apps/Contacts" revision="47c5b614dd598a95671e02723d8ba8c8cb58ca9f" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/ContactsCommon" path="packages/apps/ContactsCommon" revision="ec5061cda034f7f8507152816785b4d361a3103d" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/DeskClock" path="packages/apps/DeskClock" revision="a56610c5125c246eb79812764c1993dfc1835c01" upstream="master" />
+ <project groups="pdk" name="platform/packages/apps/DevCamera" path="packages/apps/DevCamera" revision="dbe726e31887f9cfda05c4fce57a007314af9f4e" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Dialer" path="packages/apps/Dialer" revision="d5e3de3253f46bdda4f714db76a83ec0adfbf586" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Email" path="packages/apps/Email" revision="05ddb0c52e9080c7e2b77bfd94c155701544af71" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/EmergencyInfo" path="packages/apps/EmergencyInfo" revision="e0160fb2033544918ef1337640bbcd5775391a00" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/ExactCalculator" path="packages/apps/ExactCalculator" revision="2b0283187a8f3d73ae16f2f6d398f4e103380797" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Gallery" path="packages/apps/Gallery" revision="59a1d8a611f2c9e5b8d0b56e6c9875416a5fd809" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Gallery2" path="packages/apps/Gallery2" revision="af4651e7422238ca1f6657f6391f0bf11a2ce624" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/HTMLViewer" path="packages/apps/HTMLViewer" revision="f19c4a10890cafb17ceaf68cda835dc24aad455c" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/KeyChain" path="packages/apps/KeyChain" revision="6bc410b83c4da356eba55679cbd8419ade64f213" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Launcher2" path="packages/apps/Launcher2" revision="bb66b0c637a0c47f17bfb1e1aac677661593bd48" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Launcher3" path="packages/apps/Launcher3" revision="f304453c99b9455198dc733c007bd6eba5922746" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/LegacyCamera" path="packages/apps/LegacyCamera" revision="529b5485970f23f52513a9a13c1ec96acd34aa58" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/ManagedProvisioning" path="packages/apps/ManagedProvisioning" revision="0fa07de3558b5aff8a0d960b741abc6af3b9150d" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Messaging" path="packages/apps/Messaging" revision="c3b3f3d8f11ce8c80074974110f94d07b027ef58" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Music" path="packages/apps/Music" revision="eda3322d780d9ffeac499d0845e3b1141cde0e01" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/MusicFX" path="packages/apps/MusicFX" revision="a015510a46079c764097fe00285f9ebc539e5311" upstream="master" />
+ <project groups="apps_nfc,pdk-fs" name="platform/packages/apps/Nfc" path="packages/apps/Nfc" revision="2bada48c89301ee38169d3ed06ef77381f6a06c4" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/OneTimeInitializer" path="packages/apps/OneTimeInitializer" revision="01e429c08e51291315890de9677151a7e0b6ad35" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/apps/PackageInstaller" path="packages/apps/PackageInstaller" revision="4e2a5109e509ca65e3a54b2a3c2fa5125a3083cb" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Phone" path="packages/apps/Phone" revision="79731f02b7009206a01182d2cca15dfc8491da09" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/apps/PhoneCommon" path="packages/apps/PhoneCommon" revision="dd5b923eaf36c1a137e14e043064ae352280afad" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Protips" path="packages/apps/Protips" revision="d686dc74168ed5324b2388133531f30255659ae6" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Provision" path="packages/apps/Provision" revision="53593db784a7425c424ccd52fcd122847ed26a4d" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/QuickSearchBox" path="packages/apps/QuickSearchBox" revision="39e4bb19fac59b9412fee7fa7254233dcd928123" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Settings" path="packages/apps/Settings" revision="89a055aa9d04bf81b10e793c638a5b26291d9ee7" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/SoundRecorder" path="packages/apps/SoundRecorder" revision="5f67a8eaea9379d7fce53db77e9ed7b47e49f1e1" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/SpareParts" path="packages/apps/SpareParts" revision="4db997871e3f4c3f84660815096e5276b47c5c91" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/SpeechRecorder" path="packages/apps/SpeechRecorder" revision="51cc8462c082a26810f5d7c5a2648232a77e795c" upstream="master" />
+ <project groups="apps_stk,pdk-fs" name="platform/packages/apps/Stk" path="packages/apps/Stk" revision="ea5113094bb4ba3283488b5fd8a5cafb3bb61471" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Tag" path="packages/apps/Tag" revision="6b0a720eadcb9c33d0a12d36298811e481de2a8a" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/Terminal" path="packages/apps/Terminal" revision="3cb5403b8dc4709e36aeee24fceea2ed6d4331e9" upstream="master" />
+ <project groups="pdk" name="platform/packages/apps/Test/connectivity" path="packages/apps/Test/connectivity" revision="891717b67018483387cab44683a069f4989f19a6" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/TvSettings" path="packages/apps/TvSettings" revision="10611b68644d644b6caeef30a254b551dd4584c9" upstream="master" />
+ <project name="platform/packages/apps/TV" path="packages/apps/TV" revision="a13babc9fa023a1e0c6d4d3bed99372193226fbc" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/apps/UnifiedEmail" path="packages/apps/UnifiedEmail" revision="7d3754cb8144935ca8a9a1f879cc29076c4f642e" upstream="master" />
+ <project name="platform/packages/experimental" path="packages/experimental" revision="08ec110e208e7f527cd010ee6455c0697f7a8deb" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/inputmethods/LatinIME" path="packages/inputmethods/LatinIME" revision="d2af70179eb9d4b54ac802a5f147c81106590f10" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/inputmethods/OpenWnn" path="packages/inputmethods/OpenWnn" revision="59aefa242169b7a51c2381daee58ff22fd1834ce" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/providers/ApplicationsProvider" path="packages/providers/ApplicationsProvider" revision="33d26f5eedb3d3011762ce5b2de66e931bf64b35" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/providers/BlockedNumberProvider" path="packages/providers/BlockedNumberProvider" revision="0d633224f59f09831887d9fdc8b24a437cbb8144" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/providers/BookmarkProvider" path="packages/providers/BookmarkProvider" revision="6ec4bb392332b12162c5a8a1eaba1ee34d389c5c" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/providers/CalendarProvider" path="packages/providers/CalendarProvider" revision="e34e1daee9f6e95cf10900b354bcc41300c9fe35" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/providers/CallLogProvider" path="packages/providers/CallLogProvider" revision="cb13993387ae268581e0fe9c9e998df9a8109738" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/providers/ContactsProvider" path="packages/providers/ContactsProvider" revision="71c9c8c0c1e077060877d15f2ea5317f7167d4cd" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/providers/DownloadProvider" path="packages/providers/DownloadProvider" revision="d10d8649c0eefd43d064c050e880defd13a5091b" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/providers/MediaProvider" path="packages/providers/MediaProvider" revision="9eb770968bfc9f5062367b34e5ce16bec40f6834" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/providers/PartnerBookmarksProvider" path="packages/providers/PartnerBookmarksProvider" revision="96d0a80af45923767baf449fc8c735c2f71d64ae" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/providers/TelephonyProvider" path="packages/providers/TelephonyProvider" revision="613d5b36f0656e34e279276301d43ea259be83f8" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/providers/TvProvider" path="packages/providers/TvProvider" revision="bf8a7426e7d535baacf123215f1c3937025373a0" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/providers/UserDictionaryProvider" path="packages/providers/UserDictionaryProvider" revision="f56fdc6cbe18d1b1f7c778a7890901f6a0176087" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/screensavers/Basic" path="packages/screensavers/Basic" revision="e72ef139782ca896dced7524abe7cfcc9ed35db1" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/screensavers/PhotoTable" path="packages/screensavers/PhotoTable" revision="5a048e44c69d71a9ada339797bbb22b7a8b8d3d5" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/screensavers/WebView" path="packages/screensavers/WebView" revision="6e0a80f6faed6191acc8ce1b6c79eada09e9e042" upstream="master" />
+ <project groups="adp8064,pdk-cw-fs,pdk-fs" name="platform/packages/services/Car" path="packages/services/Car" revision="c2f6f6add838f01a4cb232a34ea69171a4ccd148" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/services/Mms" path="packages/services/Mms" revision="6987193d680fe7245b462f98656652e127599dc0" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/services/Telecomm" path="packages/services/Telecomm" revision="f9e416e5155810f7b1955d01fd605b697cfd597c" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/packages/services/Telephony" path="packages/services/Telephony" revision="579dc129cfe02d2788ca189d6ee6620eb7f9e88f" upstream="master" />
+ <project groups="pdk-fs" name="platform/packages/wallpapers/LivePicker" path="packages/wallpapers/LivePicker" revision="f02a37c8b18a6dff94dd721306662cdb8439e9a7" upstream="master" />
+ <project groups="pdk" name="platform/pdk" path="pdk" revision="708589163e790eb3a7fec09f4832a0f78999bdf3" upstream="master" />
+ <project groups="pdk-fs,pdk-cw-fs,cts" name="platform/platform_testing" path="platform_testing" revision="b3ddb9fdf5225aa09bde8d0c8f0c87d9ab4e0664" upstream="master" />
+ <project clone-depth="1" groups="pdk-fs" name="platform/prebuilts/android-emulator" path="prebuilts/android-emulator" revision="8a4758b098908adc6f3fad80c0f9ad04f5660985" upstream="master" />
+ <project clone-depth="1" groups="pdk" name="platform/prebuilts/build-tools" path="prebuilts/build-tools" revision="3f66f9263b16bec27cbea6b85655d3d0bce4b994" upstream="master" />
+ <project clone-depth="1" groups="pdk,darwin" name="platform/prebuilts/clang/host/darwin-x86" path="prebuilts/clang/host/darwin-x86" revision="0aadd8ffdadaa372c2dd1fd79c13af7ff71f0bde" upstream="master" />
+ <project clone-depth="1" groups="pdk" name="platform/prebuilts/clang/host/linux-x86" path="prebuilts/clang/host/linux-x86" revision="3a1e6a6c6d906c27e69e3a5dd154d1aba09cbe9b" upstream="master" />
+ <project clone-depth="1" groups="pdk-fs" name="platform/prebuilts/deqp" path="prebuilts/deqp" revision="0d2f85253d5a040f5ba44ba22d791e02f51d440e" upstream="master" />
+ <project clone-depth="1" groups="pdk-fs" name="platform/prebuilts/devtools" path="prebuilts/devtools" revision="d054448a1147fc5294089b6ac7aa3abe92202761" upstream="master" />
+ <project clone-depth="1" groups="pdk" name="platform/prebuilts/eclipse" path="prebuilts/eclipse" revision="cf9f78f8cf41b16edf9f712598a42743d5cea4af" upstream="master" />
+ <project clone-depth="1" groups="notdefault,eclipse" name="platform/prebuilts/eclipse-build-deps" path="prebuilts/eclipse-build-deps" revision="ceb739d6a7c10f5fb5a6cf6e1f702453b1361ad3" upstream="master" />
+ <project clone-depth="1" groups="pdk,darwin,arm" name="platform/prebuilts/gcc/darwin-x86/aarch64/aarch64-linux-android-4.9" path="prebuilts/gcc/darwin-x86/aarch64/aarch64-linux-android-4.9" revision="6bf153df308133f0aeda4f4738537bbeca7f253a" upstream="master" />
+ <project clone-depth="1" groups="pdk,darwin,arm" name="platform/prebuilts/gcc/darwin-x86/arm/arm-linux-androideabi-4.9" path="prebuilts/gcc/darwin-x86/arm/arm-linux-androideabi-4.9" revision="d610c08f90ee2e0d529cb2360ac03b249e037e12" upstream="master" />
+ <project clone-depth="1" groups="pdk,darwin" name="platform/prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" path="prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" revision="ec5aa66aaa4964c27564d0ec84dc1f18a2d72b7e" upstream="master" />
+ <project clone-depth="1" groups="pdk,darwin,mips" name="platform/prebuilts/gcc/darwin-x86/mips/mips64el-linux-android-4.9" path="prebuilts/gcc/darwin-x86/mips/mips64el-linux-android-4.9" revision="0c0a70932154aaea3e32d616d7971597da608ed8" upstream="master" />
+ <project clone-depth="1" groups="pdk,darwin,x86" name="platform/prebuilts/gcc/darwin-x86/x86/x86_64-linux-android-4.9" path="prebuilts/gcc/darwin-x86/x86/x86_64-linux-android-4.9" revision="8057ba1ac5539e624df10934bee31c7ece103937" upstream="master" />
+ <project clone-depth="1" groups="pdk,linux,arm" name="platform/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9" path="prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9" revision="5f0aa27de21fbc608e48def3647cdcaef0422820" upstream="master" />
+ <project clone-depth="1" groups="pdk,linux,arm" name="platform/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9" path="prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9" revision="bf02f9b2b4f955d874fdaace765e408d0b31f1e7" upstream="master" />
+ <project clone-depth="1" groups="pdk,linux" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.11-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.11-4.8" revision="7a8824915a05bef8581fb1f614c4eee9564c4ba2" upstream="master" />
+ <project clone-depth="1" groups="pdk,linux" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" revision="73ca99196723f810dad42390d154654354f57c16" upstream="master" />
+ <project clone-depth="1" groups="pdk-fs" name="platform/prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8" revision="93ce6bc0bd97ae4bb5f7e9e7e2a752262a1ec125" upstream="master" />
+ <project clone-depth="1" groups="pdk,linux,mips" name="platform/prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9" path="prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9" revision="5365d3f36acfb7b5ee3cc707d14df6fb444f7554" upstream="master" />
+ <project clone-depth="1" groups="pdk,linux,x86" name="platform/prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9" path="prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9" revision="e89c079c1b97e6f2cbb17bbac494075fda83c374" upstream="master" />
+ <project clone-depth="1" groups="darwin" name="platform/prebuilts/gdb/darwin-x86" path="prebuilts/gdb/darwin-x86" revision="b293f0429e4f7797382e1c8d9153459996ba332c" upstream="master" />
+ <project clone-depth="1" groups="linux" name="platform/prebuilts/gdb/linux-x86" path="prebuilts/gdb/linux-x86" revision="01e05a5d0421cd02111b33b359b1e7b40e3efa24" upstream="master" />
+ <project clone-depth="1" groups="darwin,tradefed" name="platform/prebuilts/go/darwin-x86" path="prebuilts/go/darwin-x86" revision="960f10dc2e9d07589d05ab32a69e3ce379a912dc" upstream="master" />
+ <project clone-depth="1" groups="linux,tradefed" name="platform/prebuilts/go/linux-x86" path="prebuilts/go/linux-x86" revision="ad811e09d6291486ada15c1f7e84982227e9988b" upstream="master" />
+ <project clone-depth="1" groups="pdk-cw-fs,pdk-fs" name="platform/prebuilts/gradle-plugin" path="prebuilts/gradle-plugin" revision="269e773f423c9d8ae762324cc7cb1d0149a983b7" upstream="master" />
+ <project clone-depth="1" groups="pdk-cw-fs,pdk-fs" name="platform/prebuilts/libs/libedit" path="prebuilts/libs/libedit" revision="d32685dba4011664b590b94ad156bc734c2c9bb5" upstream="master" />
+ <project clone-depth="1" groups="pdk-cw-fs,pdk-fs" name="platform/prebuilts/maven_repo/android" path="prebuilts/maven_repo/android" revision="51c582c7cef5041e3df0906020713c3d97568ed7" upstream="master" />
+ <project clone-depth="1" groups="pdk" name="platform/prebuilts/misc" path="prebuilts/misc" revision="e7b8dd9761338cb0d06a7ca849e2ce99b322b47b" upstream="master" />
+ <project clone-depth="1" groups="pdk" name="platform/prebuilts/ndk" path="prebuilts/ndk" revision="dd39467272dcfcce397fd1765a41f2abb16156e5" upstream="master" />
+ <project clone-depth="1" groups="darwin,pdk,pdk-cw-fs,pdk-fs" name="platform/prebuilts/python/darwin-x86/2.7.5" path="prebuilts/python/darwin-x86/2.7.5" revision="0c5958b1636c47ed7c284f859c8e805fd06a0e63" upstream="master" />
+ <project clone-depth="1" groups="linux,pdk,pdk-cw-fs,pdk-fs" name="platform/prebuilts/python/linux-x86/2.7.5" path="prebuilts/python/linux-x86/2.7.5" revision="a3f090d3e5c1edb1fc7b769fc89ae2f27ea85a02" upstream="master" />
+ <project clone-depth="1" groups="pdk" name="platform/prebuilts/qemu-kernel" path="prebuilts/qemu-kernel" revision="60495c068240d9b4bd05f478707ac3809178a1d0" upstream="master" />
+ <project clone-depth="1" groups="pdk" name="platform/prebuilts/sdk" path="prebuilts/sdk" revision="729520ecaff9189ec17d1414bfd8c532ba6b1b20" upstream="master" />
+ <project clone-depth="1" groups="pdk,tools" name="platform/prebuilts/tools" path="prebuilts/tools" revision="ae68e07e990175496bf162f2db16ccbd266a16c7" upstream="master" />
+ <project groups="pdk-cw-fs,pdk-fs" name="platform/sdk" path="sdk" revision="576f858aaab3793cbed5e61dec31e3d141d1a559" upstream="master" />
+ <project groups="pdk" name="platform/system/bt" path="system/bt" revision="2e8e9f42132d0297a94c0852a1fbf45c9bc348e5" upstream="master" />
+ <project groups="pdk" name="platform/system/ca-certificates" path="system/ca-certificates" revision="c36d8eb8071d73528993e024e73d40c6977b1d0d" upstream="master" />
+ <project name="platform/system/connectivity/wificond" path="system/connectivity/wificond" revision="903b9d029cbb2897fc3b2c2b731bf9e1bc7bb4a0" upstream="master" />
+ <project name="platform/system/connectivity/wifilogd" path="system/connectivity/wifilogd" revision="fd59221459953a821b9021e00276749ac46ea5ea" upstream="master" />
+ <project groups="pdk" name="platform/system/core" path="system/core" revision="16017b3fbbb6ada5cd22ca38e80776981800e036" upstream="master" />
+ <project groups="pdk" name="platform/system/extras" path="system/extras" revision="89bf4d4276ebe302722d2f929d4be41b0e155e00" upstream="master" />
+ <project groups="pdk" name="platform/system/gatekeeper" path="system/gatekeeper" revision="0c4a1124137981a98331ded6f676417fcffe1bb6" upstream="master" />
+ <project groups="pdk" name="platform/system/hwservicemanager" path="system/hwservicemanager" revision="275a88cfa84c8b70e8437e5cd5cfa92caa456755" upstream="master" />
+ <project groups="pdk" name="platform/system/keymaster" path="system/keymaster" revision="eaaa95d0c290d845a4d152b50a61a740bb488611" upstream="master" />
+ <project name="platform/system/libhidl" path="system/libhidl" revision="4d1e9cccc1762eaa7613981d8d7953cb47054750" upstream="master" />
+ <project groups="pdk" name="platform/system/libhwbinder" path="system/libhwbinder" revision="44c7252234df75f82381357da0c71783ca8ad3a2" upstream="master" />
+ <project name="platform/system/libvintf" path="system/libvintf" revision="745e79f81c15db39e3278e226b35f0959342437a" upstream="master" />
+ <project groups="pdk" name="platform/system/media" path="system/media" revision="76f97250ff8fc843e321b6aea5d15f9cc1367861" upstream="master" />
+ <project name="platform/system/nativepower" path="system/nativepower" revision="92e272cb1f04defd4ea4ca6869e357d72cc780f6" upstream="master" />
+ <project groups="pdk" name="platform/system/netd" path="system/netd" revision="ff1587fc4ee0434129f6c2431eabeb291fdb0936" upstream="master" />
+ <project name="platform/system/nvram" path="system/nvram" revision="1172562603a15de574acd9e48f322a77f86d9d63" upstream="master" />
+ <project groups="pdk" name="platform/system/security" path="system/security" revision="23545340601012981ee4d0f8b99acc7da259ccc1" upstream="master" />
+ <project groups="pdk" name="platform/system/sepolicy" path="system/sepolicy" revision="e6a20295c8fcfca3fd6b70ba256b915688fd4367" upstream="master" />
+ <project groups="pdk" name="platform/system/tools/aidl" path="system/tools/aidl" revision="93298ee273b14bb658500fa1c87286bb9ae7aabb" upstream="master" />
+ <project groups="pdk" name="platform/system/tools/hidl" path="system/tools/hidl" revision="2f69a5b157e9c5c41847e55bf3e7dbb01b7de99d" upstream="master" />
+ <project name="platform/system/tpm" path="system/tpm" revision="7eb694b2d07e2e2bf520850b78bb45c678bc0014" upstream="master" />
+ <project groups="pdk" name="platform/system/update_engine" path="system/update_engine" revision="9ca8231d34dc134deaf16bcfb112a04cea8ff6d5" upstream="master" />
+ <project groups="pdk" name="platform/system/vold" path="system/vold" revision="dbeebf56aa3c6c4c3977be0effef30bbd92fba27" upstream="master" />
+ <project name="toolchain/binutils" path="toolchain/binutils" revision="082ed0f10cf59b53381cefda2f90247e2a81015b" upstream="master" />
+ <project groups="notdefault,tools" name="platform/tools/adt/idea" path="tools/adt/idea" revision="d3106c42c5466362da9bfb9f7453e5d60af83579" upstream="master" />
+ <project groups="pdk,tradefed" name="platform/tools/apksig" path="tools/apksig" revision="ad6a0efa6fcdfbaea00017b20df939d8eb54d28c" upstream="master" />
+ <project groups="notdefault,tools" name="platform/tools/base" path="tools/base" revision="908b391a9c006af569dfaff08b37f8fdd6c4da89" upstream="master" />
+ <project groups="notdefault,tools" name="platform/tools/build" path="tools/build" revision="69c4b95102b4b9862bfba68b3eaf5b7537a705ee" upstream="master" />
+ <project groups="tools" name="platform/tools/external/fat32lib" path="tools/external/fat32lib" revision="3880776e41ff7def06e351720f2d162f88b58a03" upstream="master" />
+ <project groups="tools" name="platform/tools/external/gradle" path="tools/external/gradle" revision="baf3bbc7a40c290737a5ef29ce23bd65062d906f" upstream="master" />
+ <project groups="notdefault,tools" name="platform/tools/idea" path="tools/idea" revision="9b5d02ac8c92b1e71523cc15cb3d168d57fbd898" upstream="master" />
+ <project groups="notdefault,tradefed" name="platform/tools/loganalysis" path="tools/loganalysis" revision="775ec86c48d93a5c0ee3dae94ac881154aeab37f" upstream="master" />
+ <project groups="notdefault,motodev" name="platform/tools/motodev" path="tools/motodev" revision="69989786cefbde82527960a1e100ec9afba46a98" upstream="master" />
+ <project groups="adt-infra,cts,eclipse,motodev,pdk,tools,tradefed" name="platform/tools/repohooks" path="tools/repohooks" revision="ec044d3e9b608ce275f02092f86810a3ba13834e" upstream="master" />
+ <project groups="notdefault,tools" name="platform/tools/studio/cloud" path="tools/studio/cloud" revision="58f06e77e051fff3903adabca7acdaa9dd12ec2d" upstream="master" />
+ <project groups="notdefault,tools" name="platform/tools/swt" path="tools/swt" revision="8996e71047a2bd11efee46ef14e02435ab5fa07a" upstream="master" />
+ <project groups="pdk" name="platform/tools/test/connectivity" path="tools/test/connectivity" revision="06e86507be140257abc95590fcd72b7f65a9f9a7" upstream="master" />
+ <project groups="notdefault,tradefed" name="platform/tools/tradefederation" path="tools/tradefederation" revision="fb8378796bdd86acd9d1d340a4f18aec551ce0d5" upstream="master" />
+ <repo-hooks enabled-list="pre-upload" in-project="platform/tools/repohooks" />
+</manifest>
diff --git a/current/platform/arm/libRSSupport.so b/current/platform/arm/libRSSupport.so
new file mode 100755
index 0000000..c37ca32
--- /dev/null
+++ b/current/platform/arm/libRSSupport.so
Binary files differ
diff --git a/current/platform/arm/libRSSupportIO.so b/current/platform/arm/libRSSupportIO.so
new file mode 100755
index 0000000..3d61248
--- /dev/null
+++ b/current/platform/arm/libRSSupportIO.so
Binary files differ
diff --git a/current/platform/arm/libRScpp_static.a b/current/platform/arm/libRScpp_static.a
new file mode 100644
index 0000000..71dc4d6
--- /dev/null
+++ b/current/platform/arm/libRScpp_static.a
Binary files differ
diff --git a/current/platform/arm/libblasV8.so b/current/platform/arm/libblasV8.so
new file mode 100755
index 0000000..69f2cd1
--- /dev/null
+++ b/current/platform/arm/libblasV8.so
Binary files differ
diff --git a/current/platform/arm/libcompiler_rt.a b/current/platform/arm/libcompiler_rt.a
new file mode 100644
index 0000000..4e76919
--- /dev/null
+++ b/current/platform/arm/libcompiler_rt.a
Binary files differ
diff --git a/current/platform/arm/librsrt.bc b/current/platform/arm/librsrt.bc
new file mode 100644
index 0000000..ee0429a
--- /dev/null
+++ b/current/platform/arm/librsrt.bc
Binary files differ
diff --git a/current/platform/arm64/libRSSupport.so b/current/platform/arm64/libRSSupport.so
new file mode 100755
index 0000000..084934d
--- /dev/null
+++ b/current/platform/arm64/libRSSupport.so
Binary files differ
diff --git a/current/platform/arm64/libRSSupportIO.so b/current/platform/arm64/libRSSupportIO.so
new file mode 100755
index 0000000..2dfe13d
--- /dev/null
+++ b/current/platform/arm64/libRSSupportIO.so
Binary files differ
diff --git a/current/platform/arm64/libRScpp_static.a b/current/platform/arm64/libRScpp_static.a
new file mode 100644
index 0000000..7395d4a
--- /dev/null
+++ b/current/platform/arm64/libRScpp_static.a
Binary files differ
diff --git a/current/platform/arm64/libblasV8.so b/current/platform/arm64/libblasV8.so
new file mode 100755
index 0000000..4210e41
--- /dev/null
+++ b/current/platform/arm64/libblasV8.so
Binary files differ
diff --git a/current/platform/arm64/libcompiler_rt.a b/current/platform/arm64/libcompiler_rt.a
new file mode 100644
index 0000000..949321d
--- /dev/null
+++ b/current/platform/arm64/libcompiler_rt.a
Binary files differ
diff --git a/current/platform/arm64/librsrt.bc b/current/platform/arm64/librsrt.bc
new file mode 100644
index 0000000..0a1d282
--- /dev/null
+++ b/current/platform/arm64/librsrt.bc
Binary files differ
diff --git a/current/platform/mips/libRSSupport.so b/current/platform/mips/libRSSupport.so
new file mode 100755
index 0000000..0818965
--- /dev/null
+++ b/current/platform/mips/libRSSupport.so
Binary files differ
diff --git a/current/platform/mips/libRSSupportIO.so b/current/platform/mips/libRSSupportIO.so
new file mode 100755
index 0000000..eda75df
--- /dev/null
+++ b/current/platform/mips/libRSSupportIO.so
Binary files differ
diff --git a/current/platform/mips/libRScpp_static.a b/current/platform/mips/libRScpp_static.a
new file mode 100644
index 0000000..544375f
--- /dev/null
+++ b/current/platform/mips/libRScpp_static.a
Binary files differ
diff --git a/current/platform/mips/libblasV8.so b/current/platform/mips/libblasV8.so
new file mode 100755
index 0000000..058c5cf
--- /dev/null
+++ b/current/platform/mips/libblasV8.so
Binary files differ
diff --git a/current/platform/mips/libcompiler_rt.a b/current/platform/mips/libcompiler_rt.a
new file mode 100644
index 0000000..45980e5
--- /dev/null
+++ b/current/platform/mips/libcompiler_rt.a
Binary files differ
diff --git a/current/platform/mips/librsrt.bc b/current/platform/mips/librsrt.bc
new file mode 100644
index 0000000..ee0429a
--- /dev/null
+++ b/current/platform/mips/librsrt.bc
Binary files differ
diff --git a/current/platform/renderscript-v8.jar b/current/platform/renderscript-v8.jar
new file mode 100644
index 0000000..7cc19d1
--- /dev/null
+++ b/current/platform/renderscript-v8.jar
Binary files differ
diff --git a/current/platform/rs/cpp/RenderScript.h b/current/platform/rs/cpp/RenderScript.h
new file mode 100644
index 0000000..7bd99f8
--- /dev/null
+++ b/current/platform/rs/cpp/RenderScript.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ /*! \mainpage notitle
+ *
+ * RenderScript is a high-performance runtime that provides
+ * compute operations at the native level. RenderScript code is compiled on devices
+ * at runtime to allow platform-independence as well.
+ * This reference documentation describes the RenderScript runtime APIs, which you
+ * can utilize to write RenderScript code in C99. The RenderScript compute header
+ * files are automatically included for you.
+ *
+ */
+
+#ifndef ANDROID_RENDERSCRIPT_H
+#define ANDROID_RENDERSCRIPT_H
+
+#include "rsCppStructs.h"
+
+#ifdef RS_SERVER
+#define RS_VERSION 19
+#endif
+
+#endif
diff --git a/current/platform/rs/cpp/rsCppStructs.h b/current/platform/rs/cpp/rsCppStructs.h
new file mode 100644
index 0000000..9b703a4
--- /dev/null
+++ b/current/platform/rs/cpp/rsCppStructs.h
@@ -0,0 +1,4358 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_RSCPPSTRUCTS_H
+#define ANDROID_RSCPPSTRUCTS_H
+
+#include "rsDefines.h"
+#include "util/RefBase.h"
+
+#include <pthread.h>
+
+
+/**
+ * Every row in an RS allocation is guaranteed to be aligned by this amount, and
+ * every row in a user-backed allocation must be aligned by this amount.
+ */
+#define RS_CPU_ALLOCATION_ALIGNMENT 16
+
+struct dispatchTable;
+
+namespace android {
+class Surface;
+
+namespace RSC {
+
+
+typedef void (*ErrorHandlerFunc_t)(uint32_t errorNum, const char *errorText);
+typedef void (*MessageHandlerFunc_t)(uint32_t msgNum, const void *msgData, size_t msgLen);
+
+class RS;
+class BaseObj;
+class Element;
+class Type;
+class Allocation;
+class Script;
+class ScriptC;
+class Sampler;
+
+/**
+ * Possible error codes used by RenderScript. Once a status other than RS_SUCCESS
+ * is returned, the RenderScript context is considered dead and cannot perform any
+ * additional work.
+ */
+ enum RSError {
+ RS_SUCCESS = 0, ///< No error
+ RS_ERROR_INVALID_PARAMETER = 1, ///< An invalid parameter was passed to a function
+ RS_ERROR_RUNTIME_ERROR = 2, ///< The RenderScript driver returned an error; this is
+ ///< often indicative of a kernel that crashed
+ RS_ERROR_INVALID_ELEMENT = 3, ///< An invalid Element was passed to a function
+ RS_ERROR_MAX = 9999
+
+ };
+
+ /**
+ * Flags that can control RenderScript behavior on a per-context level.
+ */
+ enum RSInitFlags {
+ RS_INIT_SYNCHRONOUS = 1, ///< All RenderScript calls will be synchronous. May reduce latency.
+ RS_INIT_LOW_LATENCY = 2, ///< Prefer low latency devices over potentially higher throughput devices.
+ // Bitflag 4 is reserved for the context flag low power
+ RS_INIT_WAIT_FOR_ATTACH = 8, ///< Kernel execution will hold to give time for a debugger to be attached
+ RS_INIT_MAX = 16
+ };
+
+
+class Byte2 {
+ public:
+ int8_t x, y;
+
+ Byte2(int8_t initX, int8_t initY)
+ : x(initX), y(initY) {}
+ Byte2() : x(0), y(0) {}
+};
+
+class Byte3 {
+ public:
+ int8_t x, y, z;
+
+ Byte3(int8_t initX, int8_t initY, int8_t initZ)
+ : x(initX), y(initY), z(initZ) {}
+ Byte3() : x(0), y(0), z(0) {}
+};
+
+class Byte4 {
+ public:
+ int8_t x, y, z, w;
+
+ Byte4(int8_t initX, int8_t initY, int8_t initZ, int8_t initW)
+ : x(initX), y(initY), z(initZ), w(initW) {}
+ Byte4() : x(0), y(0), z(0), w(0) {}
+};
+
+class UByte2 {
+ public:
+ uint8_t x, y;
+
+ UByte2(uint8_t initX, uint8_t initY)
+ : x(initX), y(initY) {}
+ UByte2() : x(0), y(0) {}
+};
+
+class UByte3 {
+ public:
+ uint8_t x, y, z;
+
+ UByte3(uint8_t initX, uint8_t initY, uint8_t initZ)
+ : x(initX), y(initY), z(initZ) {}
+ UByte3() : x(0), y(0), z(0) {}
+};
+
+class UByte4 {
+ public:
+ uint8_t x, y, z, w;
+
+ UByte4(uint8_t initX, uint8_t initY, uint8_t initZ, uint8_t initW)
+ : x(initX), y(initY), z(initZ), w(initW) {}
+ UByte4() : x(0), y(0), z(0), w(0) {}
+};
+
+class Short2 {
+ public:
+ short x, y;
+
+ Short2(short initX, short initY)
+ : x(initX), y(initY) {}
+ Short2() : x(0), y(0) {}
+};
+
+class Short3 {
+ public:
+ short x, y, z;
+
+ Short3(short initX, short initY, short initZ)
+ : x(initX), y(initY), z(initZ) {}
+ Short3() : x(0), y(0), z(0) {}
+};
+
+class Short4 {
+ public:
+ short x, y, z, w;
+
+ Short4(short initX, short initY, short initZ, short initW)
+ : x(initX), y(initY), z(initZ), w(initW) {}
+ Short4() : x(0), y(0), z(0), w(0) {}
+};
+
+class UShort2 {
+ public:
+ uint16_t x, y;
+
+ UShort2(uint16_t initX, uint16_t initY)
+ : x(initX), y(initY) {}
+ UShort2() : x(0), y(0) {}
+};
+
+class UShort3 {
+ public:
+ uint16_t x, y, z;
+
+ UShort3(uint16_t initX, uint16_t initY, uint16_t initZ)
+ : x(initX), y(initY), z(initZ) {}
+ UShort3() : x(0), y(0), z(0) {}
+};
+
+class UShort4 {
+ public:
+ uint16_t x, y, z, w;
+
+ UShort4(uint16_t initX, uint16_t initY, uint16_t initZ, uint16_t initW)
+ : x(initX), y(initY), z(initZ), w(initW) {}
+ UShort4() : x(0), y(0), z(0), w(0) {}
+};
+
+class Int2 {
+ public:
+ int x, y;
+
+ Int2(int initX, int initY)
+ : x(initX), y(initY) {}
+ Int2() : x(0), y(0) {}
+};
+
+class Int3 {
+ public:
+ int x, y, z;
+
+ Int3(int initX, int initY, int initZ)
+ : x(initX), y(initY), z(initZ) {}
+ Int3() : x(0), y(0), z(0) {}
+};
+
+class Int4 {
+ public:
+ int x, y, z, w;
+
+ Int4(int initX, int initY, int initZ, int initW)
+ : x(initX), y(initY), z(initZ), w(initW) {}
+ Int4() : x(0), y(0), z(0), w(0) {}
+};
+
+class UInt2 {
+ public:
+ uint32_t x, y;
+
+ UInt2(uint32_t initX, uint32_t initY)
+ : x(initX), y(initY) {}
+ UInt2() : x(0), y(0) {}
+};
+
+class UInt3 {
+ public:
+ uint32_t x, y, z;
+
+ UInt3(uint32_t initX, uint32_t initY, uint32_t initZ)
+ : x(initX), y(initY), z(initZ) {}
+ UInt3() : x(0), y(0), z(0) {}
+};
+
+class UInt4 {
+ public:
+ uint32_t x, y, z, w;
+
+ UInt4(uint32_t initX, uint32_t initY, uint32_t initZ, uint32_t initW)
+ : x(initX), y(initY), z(initZ), w(initW) {}
+ UInt4() : x(0), y(0), z(0), w(0) {}
+};
+
+class Long2 {
+ public:
+ int64_t x, y;
+
+ Long2(int64_t initX, int64_t initY)
+ : x(initX), y(initY) {}
+ Long2() : x(0), y(0) {}
+};
+
+class Long3 {
+ public:
+ int64_t x, y, z;
+
+ Long3(int64_t initX, int64_t initY, int64_t initZ)
+ : x(initX), y(initY), z(initZ) {}
+ Long3() : x(0), y(0), z(0) {}
+};
+
+class Long4 {
+ public:
+ int64_t x, y, z, w;
+
+ Long4(int64_t initX, int64_t initY, int64_t initZ, int64_t initW)
+ : x(initX), y(initY), z(initZ), w(initW) {}
+ Long4() : x(0), y(0), z(0), w(0) {}
+};
+
+class ULong2 {
+ public:
+ uint64_t x, y;
+
+ ULong2(uint64_t initX, uint64_t initY)
+ : x(initX), y(initY) {}
+ ULong2() : x(0), y(0) {}
+};
+
+class ULong3 {
+ public:
+ uint64_t x, y, z;
+
+ ULong3(uint64_t initX, uint64_t initY, uint64_t initZ)
+ : x(initX), y(initY), z(initZ) {}
+ ULong3() : x(0), y(0), z(0) {}
+};
+
+class ULong4 {
+ public:
+ uint64_t x, y, z, w;
+
+ ULong4(uint64_t initX, uint64_t initY, uint64_t initZ, uint64_t initW)
+ : x(initX), y(initY), z(initZ), w(initW) {}
+ ULong4() : x(0), y(0), z(0), w(0) {}
+};
+
+class Float2 {
+ public:
+ float x, y;
+
+ Float2(float initX, float initY)
+ : x(initX), y(initY) {}
+ Float2() : x(0), y(0) {}
+};
+
+class Float3 {
+ public:
+ float x, y, z;
+
+ Float3(float initX, float initY, float initZ)
+ : x(initX), y(initY), z(initZ) {}
+ Float3() : x(0.f), y(0.f), z(0.f) {}
+};
+
+class Float4 {
+ public:
+ float x, y, z, w;
+
+ Float4(float initX, float initY, float initZ, float initW)
+ : x(initX), y(initY), z(initZ), w(initW) {}
+ Float4() : x(0.f), y(0.f), z(0.f), w(0.f) {}
+};
+
+class Double2 {
+ public:
+ double x, y;
+
+ Double2(double initX, double initY)
+ : x(initX), y(initY) {}
+ Double2() : x(0), y(0) {}
+};
+
+class Double3 {
+ public:
+ double x, y, z;
+
+ Double3(double initX, double initY, double initZ)
+ : x(initX), y(initY), z(initZ) {}
+ Double3() : x(0), y(0), z(0) {}
+};
+
+class Double4 {
+ public:
+ double x, y, z, w;
+
+ Double4(double initX, double initY, double initZ, double initW)
+ : x(initX), y(initY), z(initZ), w(initW) {}
+ Double4() : x(0), y(0), z(0), w(0) {}
+};
+
+ /**
+ * The RenderScript context. This class controls initialization, resource management, and teardown.
+ */
+ class RS : public android::RSC::LightRefBase<RS> {
+
+ public:
+ RS();
+ virtual ~RS();
+
+ /**
+ * Initializes a RenderScript context. A context must be initialized before it can be used.
+ * @param[in] name Directory name to be used by this context. This should be equivalent to
+ * Context.getCacheDir().
+ * @param[in] flags Optional flags for this context.
+ * @return true on success
+ */
+ bool init(const char * name, uint32_t flags = 0);
+
+ /**
+ * Initializes a RenderScript context. A context must be initialized before it can be used.
+ * @param[in] name Directory name to be used by this context. This should be equivalent to
+ * Context.getCacheDir().
+ * @param[in] flags Flags for this context.
+ * @param[in] targetApi Target RS API level.
+ * @return true on success
+ */
+ bool init(const char * name, uint32_t flags, int targetApi);
+
+ /**
+ * Sets the error handler function for this context. This error handler is
+ * called whenever an error is set.
+ *
+ * @param[in] func Error handler function
+ */
+ void setErrorHandler(ErrorHandlerFunc_t func);
+
+ /**
+ * Returns the current error handler function for this context.
+ *
+ * @return pointer to current error handler function or NULL if not set
+ */
+ ErrorHandlerFunc_t getErrorHandler() { return mErrorFunc; }
+
+ /**
+ * Sets the message handler function for this context. This message handler
+ * is called whenever a message is sent from a RenderScript kernel.
+ *
+ * @param[in] func Message handler function
+ */
+ void setMessageHandler(MessageHandlerFunc_t func);
+
+ /**
+ * Returns the current message handler function for this context.
+ *
+ * @return pointer to current message handler function or NULL if not set
+ */
+ MessageHandlerFunc_t getMessageHandler() { return mMessageFunc; }
+
+ /**
+ * Returns current status for the context.
+ *
+ * @return current error
+ */
+ RSError getError();
+
+ /**
+ * Waits for any currently running asynchronous operations to finish. This
+ * should only be used for performance testing and timing.
+ */
+ void finish();
+
+ RsContext getContext() { return mContext; }
+ void throwError(RSError error, const char *errMsg);
+
+ static dispatchTable* dispatch;
+
+ private:
+ static bool usingNative;
+ static bool initDispatch(int targetApi);
+
+ static void * threadProc(void *);
+
+ static bool gInitialized;
+ static pthread_mutex_t gInitMutex;
+
+ pthread_t mMessageThreadId;
+ pid_t mNativeMessageThreadId;
+ bool mMessageRun;
+
+ RsContext mContext;
+ RSError mCurrentError;
+
+ ErrorHandlerFunc_t mErrorFunc;
+ MessageHandlerFunc_t mMessageFunc;
+ bool mInit;
+
+ char mCacheDir[PATH_MAX+1];
+ uint32_t mCacheDirLen;
+
+ struct {
+ sp<const Element> U8;
+ sp<const Element> U8_2;
+ sp<const Element> U8_3;
+ sp<const Element> U8_4;
+ sp<const Element> I8;
+ sp<const Element> I8_2;
+ sp<const Element> I8_3;
+ sp<const Element> I8_4;
+ sp<const Element> U16;
+ sp<const Element> U16_2;
+ sp<const Element> U16_3;
+ sp<const Element> U16_4;
+ sp<const Element> I16;
+ sp<const Element> I16_2;
+ sp<const Element> I16_3;
+ sp<const Element> I16_4;
+ sp<const Element> U32;
+ sp<const Element> U32_2;
+ sp<const Element> U32_3;
+ sp<const Element> U32_4;
+ sp<const Element> I32;
+ sp<const Element> I32_2;
+ sp<const Element> I32_3;
+ sp<const Element> I32_4;
+ sp<const Element> U64;
+ sp<const Element> U64_2;
+ sp<const Element> U64_3;
+ sp<const Element> U64_4;
+ sp<const Element> I64;
+ sp<const Element> I64_2;
+ sp<const Element> I64_3;
+ sp<const Element> I64_4;
+ sp<const Element> F16;
+ sp<const Element> F16_2;
+ sp<const Element> F16_3;
+ sp<const Element> F16_4;
+ sp<const Element> F32;
+ sp<const Element> F32_2;
+ sp<const Element> F32_3;
+ sp<const Element> F32_4;
+ sp<const Element> F64;
+ sp<const Element> F64_2;
+ sp<const Element> F64_3;
+ sp<const Element> F64_4;
+ sp<const Element> BOOLEAN;
+
+ sp<const Element> ELEMENT;
+ sp<const Element> TYPE;
+ sp<const Element> ALLOCATION;
+ sp<const Element> SAMPLER;
+ sp<const Element> SCRIPT;
+ sp<const Element> MESH;
+ sp<const Element> PROGRAM_FRAGMENT;
+ sp<const Element> PROGRAM_VERTEX;
+ sp<const Element> PROGRAM_RASTER;
+ sp<const Element> PROGRAM_STORE;
+
+ sp<const Element> A_8;
+ sp<const Element> RGB_565;
+ sp<const Element> RGB_888;
+ sp<const Element> RGBA_5551;
+ sp<const Element> RGBA_4444;
+ sp<const Element> RGBA_8888;
+
+ sp<const Element> YUV;
+
+ sp<const Element> MATRIX_4X4;
+ sp<const Element> MATRIX_3X3;
+ sp<const Element> MATRIX_2X2;
+ } mElements;
+
+ struct {
+ sp<const Sampler> CLAMP_NEAREST;
+ sp<const Sampler> CLAMP_LINEAR;
+ sp<const Sampler> CLAMP_LINEAR_MIP_LINEAR;
+ sp<const Sampler> WRAP_NEAREST;
+ sp<const Sampler> WRAP_LINEAR;
+ sp<const Sampler> WRAP_LINEAR_MIP_LINEAR;
+ sp<const Sampler> MIRRORED_REPEAT_NEAREST;
+ sp<const Sampler> MIRRORED_REPEAT_LINEAR;
+ sp<const Sampler> MIRRORED_REPEAT_LINEAR_MIP_LINEAR;
+ } mSamplers;
+ friend class Sampler;
+ friend class Element;
+ friend class ScriptC;
+};
+
+ /**
+ * Base class for all RenderScript objects. Not for direct use by developers.
+ */
+class BaseObj : public android::RSC::LightRefBase<BaseObj> {
+public:
+ void * getID() const;
+ virtual ~BaseObj();
+ virtual void updateFromNative();
+ virtual bool equals(const sp<const BaseObj>& obj);
+
+protected:
+ void *mID;
+ RS* mRS;
+ const char * mName;
+
+ BaseObj(void *id, sp<RS> rs);
+ void checkValid();
+
+ static void * getObjID(const sp<const BaseObj>& o);
+
+};
+
+ /**
+ * This class provides the primary method through which data is passed to and
+ * from RenderScript kernels. An Allocation provides the backing store for a
+ * given Type.
+ *
+ * An Allocation also contains a set of usage flags that denote how the
+ * Allocation could be used. For example, an Allocation may have usage flags
+ * specifying that it can be used from a script as well as input to a
+ * Sampler. A developer must synchronize across these different usages using
+ * syncAll(int) in order to ensure that different users of the Allocation have
+ * a consistent view of memory. For example, in the case where an Allocation is
+ * used as the output of one kernel and as Sampler input in a later kernel, a
+ * developer must call syncAll(RS_ALLOCATION_USAGE_SCRIPT) prior to launching the
+ * second kernel to ensure correctness.
+ */
+class Allocation : public BaseObj {
+protected:
+ sp<const Type> mType;
+ uint32_t mUsage;
+ sp<Allocation> mAdaptedAllocation;
+
+ bool mConstrainedLOD;
+ bool mConstrainedFace;
+ bool mConstrainedY;
+ bool mConstrainedZ;
+ bool mReadAllowed;
+ bool mWriteAllowed;
+ bool mAutoPadding;
+ uint32_t mSelectedY;
+ uint32_t mSelectedZ;
+ uint32_t mSelectedLOD;
+ RsAllocationCubemapFace mSelectedFace;
+
+ uint32_t mCurrentDimX;
+ uint32_t mCurrentDimY;
+ uint32_t mCurrentDimZ;
+ uint32_t mCurrentCount;
+
+ void * getIDSafe() const;
+ void updateCacheInfo(const sp<const Type>& t);
+
+ Allocation(void *id, sp<RS> rs, sp<const Type> t, uint32_t usage);
+
+ void validateIsInt64();
+ void validateIsInt32();
+ void validateIsInt16();
+ void validateIsInt8();
+ void validateIsFloat32();
+ void validateIsFloat64();
+ void validateIsObject();
+
+ virtual void updateFromNative();
+
+ void validate2DRange(uint32_t xoff, uint32_t yoff, uint32_t w, uint32_t h);
+ void validate3DRange(uint32_t xoff, uint32_t yoff, uint32_t zoff,
+ uint32_t w, uint32_t h, uint32_t d);
+
+public:
+
+ /**
+ * Return Type for the allocation.
+ * @return pointer to underlying Type
+ */
+ sp<const Type> getType() const {
+ return mType;
+ }
+
+ /**
+ * Enable/Disable AutoPadding for Vec3 elements.
+ *
+ * @param useAutoPadding True: enable AutoPadding; flase: disable AutoPadding
+ *
+ */
+ void setAutoPadding(bool useAutoPadding) {
+ mAutoPadding = useAutoPadding;
+ }
+
+ /**
+ * Propagate changes from one usage of the Allocation to other usages of the Allocation.
+ * @param[in] srcLocation source location with changes to propagate elsewhere
+ */
+ void syncAll(RsAllocationUsageType srcLocation);
+
+ /**
+ * Send a buffer to the output stream. The contents of the Allocation will
+ * be undefined after this operation. This operation is only valid if
+ * USAGE_IO_OUTPUT is set on the Allocation.
+ */
+ void ioSendOutput();
+
+ /**
+ * Receive the latest input into the Allocation. This operation
+ * is only valid if USAGE_IO_INPUT is set on the Allocation.
+ */
+ void ioGetInput();
+
+#if !defined(RS_SERVER) && !defined(RS_COMPATIBILITY_LIB)
+ /**
+ * Returns the handle to a raw buffer that is being managed by the screen
+ * compositor. This operation is only valid for Allocations with USAGE_IO_INPUT.
+ * @return Surface associated with allocation
+ */
+ sp<Surface> getSurface();
+
+ /**
+ * Associate a Surface with this Allocation. This
+ * operation is only valid for Allocations with USAGE_IO_OUTPUT.
+ * @param[in] s Surface to associate with allocation
+ */
+ void setSurface(const sp<Surface>& s);
+#endif
+
+ /**
+ * Generate a mipmap chain. This is only valid if the Type of the Allocation
+ * includes mipmaps. This function will generate a complete set of mipmaps
+ * from the top level LOD and place them into the script memory space. If
+ * the Allocation is also using other memory spaces, a call to
+ * syncAll(Allocation.USAGE_SCRIPT) is required.
+ */
+ void generateMipmaps();
+
+ /**
+ * Copy an array into part of this Allocation.
+ * @param[in] off offset of first Element to be overwritten
+ * @param[in] count number of Elements to copy
+ * @param[in] data array from which to copy
+ */
+ void copy1DRangeFrom(uint32_t off, size_t count, const void *data);
+
+ /**
+ * Copy part of an Allocation into part of this Allocation.
+ * @param[in] off offset of first Element to be overwritten
+ * @param[in] count number of Elements to copy
+ * @param[in] data Allocation from which to copy
+ * @param[in] dataOff offset of first Element in data to copy
+ */
+ void copy1DRangeFrom(uint32_t off, size_t count, const sp<const Allocation>& data, uint32_t dataOff);
+
+ /**
+ * Copy an array into part of this Allocation.
+ * @param[in] off offset of first Element to be overwritten
+ * @param[in] count number of Elements to copy
+ * @param[in] data array from which to copy
+ */
+ void copy1DRangeTo(uint32_t off, size_t count, void *data);
+
+ /**
+ * Copy entire array to an Allocation.
+ * @param[in] data array from which to copy
+ */
+ void copy1DFrom(const void* data);
+
+ /**
+ * Copy entire Allocation to an array.
+ * @param[in] data destination array
+ */
+ void copy1DTo(void* data);
+
+ /**
+ * Copy from an array into a rectangular region in this Allocation. The
+ * array is assumed to be tightly packed.
+ * @param[in] xoff X offset of region to update in this Allocation
+ * @param[in] yoff Y offset of region to update in this Allocation
+ * @param[in] w Width of region to update
+ * @param[in] h Height of region to update
+ * @param[in] data Array from which to copy
+ */
+ void copy2DRangeFrom(uint32_t xoff, uint32_t yoff, uint32_t w, uint32_t h,
+ const void *data);
+
+ /**
+ * Copy from this Allocation into a rectangular region in an array. The
+ * array is assumed to be tightly packed.
+ * @param[in] xoff X offset of region to copy from this Allocation
+ * @param[in] yoff Y offset of region to copy from this Allocation
+ * @param[in] w Width of region to update
+ * @param[in] h Height of region to update
+ * @param[in] data destination array
+ */
+ void copy2DRangeTo(uint32_t xoff, uint32_t yoff, uint32_t w, uint32_t h,
+ void *data);
+
+ /**
+ * Copy from an Allocation into a rectangular region in this Allocation.
+ * @param[in] xoff X offset of region to update in this Allocation
+ * @param[in] yoff Y offset of region to update in this Allocation
+ * @param[in] w Width of region to update
+ * @param[in] h Height of region to update
+ * @param[in] data Allocation from which to copy
+ * @param[in] dataXoff X offset of region to copy from in data
+ * @param[in] dataYoff Y offset of region to copy from in data
+ */
+ void copy2DRangeFrom(uint32_t xoff, uint32_t yoff, uint32_t w, uint32_t h,
+ const sp<const Allocation>& data, uint32_t dataXoff, uint32_t dataYoff);
+
+ /**
+ * Copy from a strided array into a rectangular region in this Allocation.
+ * @param[in] xoff X offset of region to update in this Allocation
+ * @param[in] yoff Y offset of region to update in this Allocation
+ * @param[in] w Width of region to update
+ * @param[in] h Height of region to update
+ * @param[in] data array from which to copy
+ * @param[in] stride stride of data in bytes
+ */
+ void copy2DStridedFrom(uint32_t xoff, uint32_t yoff, uint32_t w, uint32_t h,
+ const void *data, size_t stride);
+
+ /**
+ * Copy from a strided array into this Allocation.
+ * @param[in] data array from which to copy
+ * @param[in] stride stride of data in bytes
+ */
+ void copy2DStridedFrom(const void *data, size_t stride);
+
+ /**
+ * Copy from a rectangular region in this Allocation into a strided array.
+ * @param[in] xoff X offset of region to update in this Allocation
+ * @param[in] yoff Y offset of region to update in this Allocation
+ * @param[in] w Width of region to update
+ * @param[in] h Height of region to update
+ * @param[in] data destination array
+ * @param[in] stride stride of data in bytes
+ */
+ void copy2DStridedTo(uint32_t xoff, uint32_t yoff, uint32_t w, uint32_t h,
+ void *data, size_t stride);
+
+ /**
+ * Copy this Allocation into a strided array.
+ * @param[in] data destination array
+ * @param[in] stride stride of data in bytes
+ */
+ void copy2DStridedTo(void *data, size_t stride);
+
+
+ /**
+ * Copy from an array into a 3D region in this Allocation. The
+ * array is assumed to be tightly packed.
+ * @param[in] xoff X offset of region to update in this Allocation
+ * @param[in] yoff Y offset of region to update in this Allocation
+ * @param[in] zoff Z offset of region to update in this Allocation
+ * @param[in] w Width of region to update
+ * @param[in] h Height of region to update
+ * @param[in] d Depth of region to update
+ * @param[in] data Array from which to copy
+ */
+ void copy3DRangeFrom(uint32_t xoff, uint32_t yoff, uint32_t zoff, uint32_t w,
+ uint32_t h, uint32_t d, const void* data);
+
+ /**
+ * Copy from an Allocation into a 3D region in this Allocation.
+ * @param[in] xoff X offset of region to update in this Allocation
+ * @param[in] yoff Y offset of region to update in this Allocation
+ * @param[in] zoff Z offset of region to update in this Allocation
+ * @param[in] w Width of region to update
+ * @param[in] h Height of region to update
+ * @param[in] d Depth of region to update
+ * @param[in] data Allocation from which to copy
+ * @param[in] dataXoff X offset of region in data to copy from
+ * @param[in] dataYoff Y offset of region in data to copy from
+ * @param[in] dataZoff Z offset of region in data to copy from
+ */
+ void copy3DRangeFrom(uint32_t xoff, uint32_t yoff, uint32_t zoff,
+ uint32_t w, uint32_t h, uint32_t d,
+ const sp<const Allocation>& data,
+ uint32_t dataXoff, uint32_t dataYoff, uint32_t dataZoff);
+
+ /**
+ * Copy a 3D region in this Allocation into an array. The
+ * array is assumed to be tightly packed.
+ * @param[in] xoff X offset of region to update in this Allocation
+ * @param[in] yoff Y offset of region to update in this Allocation
+ * @param[in] zoff Z offset of region to update in this Allocation
+ * @param[in] w Width of region to update
+ * @param[in] h Height of region to update
+ * @param[in] d Depth of region to update
+ * @param[in] data Array from which to copy
+ */
+ void copy3DRangeTo(uint32_t xoff, uint32_t yoff, uint32_t zoff, uint32_t w,
+ uint32_t h, uint32_t d, void* data);
+
+ /**
+ * Creates an Allocation for use by scripts with a given Type.
+ * @param[in] rs Context to which the Allocation will belong
+ * @param[in] type Type of the Allocation
+ * @param[in] mipmaps desired mipmap behavior for the Allocation
+ * @param[in] usage usage for the Allocation
+ * @return new Allocation
+ */
+ static sp<Allocation> createTyped(const sp<RS>& rs, const sp<const Type>& type,
+ RsAllocationMipmapControl mipmaps, uint32_t usage);
+
+ /**
+ * Creates an Allocation for use by scripts with a given Type and a backing pointer. For use
+ * with RS_ALLOCATION_USAGE_SHARED.
+ * @param[in] rs Context to which the Allocation will belong
+ * @param[in] type Type of the Allocation
+ * @param[in] mipmaps desired mipmap behavior for the Allocation
+ * @param[in] usage usage for the Allocation
+ * @param[in] pointer existing backing store to use for this Allocation if possible
+ * @return new Allocation
+ */
+ static sp<Allocation> createTyped(const sp<RS>& rs, const sp<const Type>& type,
+ RsAllocationMipmapControl mipmaps, uint32_t usage, void * pointer);
+
+ /**
+ * Creates an Allocation for use by scripts with a given Type with no mipmaps.
+ * @param[in] rs Context to which the Allocation will belong
+ * @param[in] type Type of the Allocation
+ * @param[in] usage usage for the Allocation
+ * @return new Allocation
+ */
+ static sp<Allocation> createTyped(const sp<RS>& rs, const sp<const Type>& type,
+ uint32_t usage = RS_ALLOCATION_USAGE_SCRIPT);
+ /**
+ * Creates an Allocation with a specified number of given elements.
+ * @param[in] rs Context to which the Allocation will belong
+ * @param[in] e Element used in the Allocation
+ * @param[in] count Number of elements of the Allocation
+ * @param[in] usage usage for the Allocation
+ * @return new Allocation
+ */
+ static sp<Allocation> createSized(const sp<RS>& rs, const sp<const Element>& e, size_t count,
+ uint32_t usage = RS_ALLOCATION_USAGE_SCRIPT);
+
+ /**
+ * Creates a 2D Allocation with a specified number of given elements.
+ * @param[in] rs Context to which the Allocation will belong
+ * @param[in] e Element used in the Allocation
+ * @param[in] x Width in Elements of the Allocation
+ * @param[in] y Height of the Allocation
+ * @param[in] usage usage for the Allocation
+ * @return new Allocation
+ */
+ static sp<Allocation> createSized2D(const sp<RS>& rs, const sp<const Element>& e,
+ size_t x, size_t y,
+ uint32_t usage = RS_ALLOCATION_USAGE_SCRIPT);
+
+
+ /**
+ * Get the backing pointer for a USAGE_SHARED allocation.
+ * @param[in] stride optional parameter. when non-NULL, will contain
+ * stride in bytes of a 2D Allocation
+ * @return pointer to data
+ */
+ void * getPointer(size_t *stride = NULL);
+};
+
+ /**
+ * An Element represents one item within an Allocation. An Element is roughly
+ * equivalent to a C type in a RenderScript kernel. Elements may be basic
+ * or complex. Some basic elements are:
+
+ * - A single float value (equivalent to a float in a kernel)
+ * - A four-element float vector (equivalent to a float4 in a kernel)
+ * - An unsigned 32-bit integer (equivalent to an unsigned int in a kernel)
+ * - A single signed 8-bit integer (equivalent to a char in a kernel)
+
+ * Basic Elements are comprised of a Element.DataType and a
+ * Element.DataKind. The DataType encodes C type information of an Element,
+ * while the DataKind encodes how that Element should be interpreted by a
+ * Sampler. Note that Allocation objects with DataKind USER cannot be used as
+ * input for a Sampler. In general, Allocation objects that are intended for
+ * use with a Sampler should use bitmap-derived Elements such as
+ * Element::RGBA_8888.
+ */
+
+
+class Element : public BaseObj {
+public:
+ bool isComplex();
+
+ /**
+ * Elements could be simple, such as an int or a float, or a structure with
+ * multiple sub-elements, such as a collection of floats, float2,
+ * float4. This function returns zero for simple elements or the number of
+ * sub-elements otherwise.
+ * @return number of sub-elements
+ */
+ size_t getSubElementCount() {
+ return mVisibleElementMapSize;
+ }
+
+ /**
+ * For complex Elements, this returns the sub-element at a given index.
+ * @param[in] index index of sub-element
+ * @return sub-element
+ */
+ sp<const Element> getSubElement(uint32_t index);
+
+ /**
+ * For complex Elements, this returns the name of the sub-element at a given
+ * index.
+ * @param[in] index index of sub-element
+ * @return name of sub-element
+ */
+ const char * getSubElementName(uint32_t index);
+
+ /**
+ * For complex Elements, this returns the size of the sub-element at a given
+ * index.
+ * @param[in] index index of sub-element
+ * @return size of sub-element
+ */
+ size_t getSubElementArraySize(uint32_t index);
+
+ /**
+ * Returns the location of a sub-element within a complex Element.
+ * @param[in] index index of sub-element
+ * @return offset in bytes
+ */
+ uint32_t getSubElementOffsetBytes(uint32_t index);
+
+ /**
+ * Returns the data type used for the Element.
+ * @return data type
+ */
+ RsDataType getDataType() const {
+ return mType;
+ }
+
+ /**
+ * Returns the data kind used for the Element.
+ * @return data kind
+ */
+ RsDataKind getDataKind() const {
+ return mKind;
+ }
+
+ /**
+ * Returns the size in bytes of the Element.
+ * @return size in bytes
+ */
+ size_t getSizeBytes() const {
+ return mSizeBytes;
+ }
+
+ /**
+ * Returns the number of vector components for this Element.
+ * @return number of vector components
+ */
+ uint32_t getVectorSize() const {
+ return mVectorSize;
+ }
+
+ /**
+ * Utility function for returning an Element containing a single bool.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> BOOLEAN(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single unsigned char.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U8(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single signed char.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I8(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single unsigned short.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U16(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single signed short.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I16(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single unsigned int.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U32(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single signed int.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I32(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single unsigned long long.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U64(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single signed long long.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I64(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single half.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F16(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single float.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F32(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single double.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F64(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single Element.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> ELEMENT(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single Type.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> TYPE(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single Allocation.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> ALLOCATION(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single Sampler.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> SAMPLER(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a single Script.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> SCRIPT(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an ALPHA_8 pixel.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> A_8(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an RGB_565 pixel.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> RGB_565(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an RGB_888 pixel.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> RGB_888(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an RGBA_5551 pixel.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> RGBA_5551(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an RGBA_4444 pixel.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> RGBA_4444(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an RGBA_8888 pixel.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> RGBA_8888(const sp<RS> &rs);
+
+ /**
+ * Utility function for returning an Element containing a half2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F16_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a half3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F16_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a half4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F16_4(const sp<RS> &rs);
+
+ /**
+ * Utility function for returning an Element containing a float2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F32_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a float3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F32_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a float4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F32_4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a double2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F64_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a double3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F64_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a double4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> F64_4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a uchar2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U8_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a uchar3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U8_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a uchar4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U8_4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a char2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I8_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a char3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I8_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a char4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I8_4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a ushort2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U16_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a ushort3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U16_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a ushort4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U16_4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a short2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I16_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a short3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I16_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a short4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I16_4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a uint2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U32_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a uint3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U32_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a uint4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U32_4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an int2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I32_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an int3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I32_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an int4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I32_4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a ulong2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U64_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a ulong3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U64_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a ulong4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> U64_4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a long2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I64_2(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a long3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I64_3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a long4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> I64_4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing a YUV pixel.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> YUV(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an rs_matrix_4x4.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> MATRIX_4X4(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an rs_matrix_3x3.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> MATRIX_3X3(const sp<RS> &rs);
+ /**
+ * Utility function for returning an Element containing an rs_matrix_2x2.
+ * @param[in] rs RenderScript context
+ * @return Element
+ */
+ static sp<const Element> MATRIX_2X2(const sp<RS> &rs);
+
+ void updateFromNative();
+
+ /**
+ * Create an Element with a given DataType.
+ * @param[in] rs RenderScript context
+ * @param[in] dt data type
+ * @return Element
+ */
+ static sp<const Element> createUser(const sp<RS>& rs, RsDataType dt);
+ /**
+ * Create a vector Element with the given DataType
+ * @param[in] rs RenderScript
+ * @param[in] dt DataType
+ * @param[in] size vector size
+ * @return Element
+ */
+ static sp<const Element> createVector(const sp<RS>& rs, RsDataType dt, uint32_t size);
+ /**
+ * Create an Element with a given DataType and DataKind.
+ * @param[in] rs RenderScript context
+ * @param[in] dt DataType
+ * @param[in] dk DataKind
+ * @return Element
+ */
+ static sp<const Element> createPixel(const sp<RS>& rs, RsDataType dt, RsDataKind dk);
+
+ /**
+ * Returns true if the Element can interoperate with this Element.
+ * @param[in] e Element to compare
+ * @return true if Elements can interoperate
+ */
+ bool isCompatible(const sp<const Element>&e) const;
+
+ /**
+ * Builder class for producing complex elements with matching field and name
+ * pairs. The builder starts empty. The order in which elements are added is
+ * retained for the layout in memory.
+ */
+ class Builder {
+ private:
+ RS* mRS;
+ size_t mElementsCount;
+ size_t mElementsVecSize;
+ sp<const Element> * mElements;
+ char ** mElementNames;
+ size_t * mElementNameLengths;
+ uint32_t * mArraySizes;
+ bool mSkipPadding;
+
+ public:
+ explicit Builder(sp<RS> rs);
+ ~Builder();
+ void add(const sp<const Element>& e, const char * name, uint32_t arraySize = 1);
+ sp<const Element> create();
+ };
+
+protected:
+ friend class Type;
+ Element(void *id, sp<RS> rs,
+ sp<const Element> * elements,
+ size_t elementCount,
+ const char ** elementNames,
+ size_t * elementNameLengths,
+ uint32_t * arraySizes);
+ Element(void *id, sp<RS> rs, RsDataType dt, RsDataKind dk, bool norm, uint32_t size);
+ Element(void *id, sp<RS> rs);
+ explicit Element(sp<RS> rs);
+ virtual ~Element();
+
+private:
+ void updateVisibleSubElements();
+
+ size_t mElementsCount;
+ size_t mVisibleElementMapSize;
+
+ sp<const Element> * mElements;
+ char ** mElementNames;
+ size_t * mElementNameLengths;
+ uint32_t * mArraySizes;
+ uint32_t * mVisibleElementMap;
+ uint32_t * mOffsetInBytes;
+
+ RsDataType mType;
+ RsDataKind mKind;
+ bool mNormalized;
+ size_t mSizeBytes;
+ size_t mVectorSize;
+};
+
+class FieldPacker {
+protected:
+ unsigned char* mData;
+ size_t mPos;
+ size_t mLen;
+
+public:
+ explicit FieldPacker(size_t len)
+ : mPos(0), mLen(len) {
+ mData = new unsigned char[len];
+ }
+
+ virtual ~FieldPacker() {
+ delete [] mData;
+ }
+
+ void align(size_t v) {
+ if ((v & (v - 1)) != 0) {
+ // ALOGE("Non-power-of-two alignment: %zu", v);
+ return;
+ }
+
+ while ((mPos & (v - 1)) != 0) {
+ mData[mPos++] = 0;
+ }
+ }
+
+ void reset() {
+ mPos = 0;
+ }
+
+ void reset(size_t i) {
+ if (i >= mLen) {
+ // ALOGE("Out of bounds: i (%zu) >= len (%zu)", i, mLen);
+ return;
+ }
+ mPos = i;
+ }
+
+ void skip(size_t i) {
+ size_t res = mPos + i;
+ if (res > mLen) {
+ // ALOGE("Exceeded buffer length: i (%zu) > len (%zu)", i, mLen);
+ return;
+ }
+ mPos = res;
+ }
+
+ void* getData() const {
+ return mData;
+ }
+
+ size_t getLength() const {
+ return mLen;
+ }
+
+ template <typename T>
+ void add(T t) {
+ align(sizeof(t));
+ if (mPos + sizeof(t) <= mLen) {
+ memcpy(&mData[mPos], &t, sizeof(t));
+ mPos += sizeof(t);
+ }
+ }
+
+ /*
+ void add(rs_matrix4x4 m) {
+ for (size_t i = 0; i < 16; i++) {
+ add(m.m[i]);
+ }
+ }
+
+ void add(rs_matrix3x3 m) {
+ for (size_t i = 0; i < 9; i++) {
+ add(m.m[i]);
+ }
+ }
+
+ void add(rs_matrix2x2 m) {
+ for (size_t i = 0; i < 4; i++) {
+ add(m.m[i]);
+ }
+ }
+ */
+
+ void add(const sp<BaseObj>& obj) {
+ if (obj != NULL) {
+ add((uint32_t) (uintptr_t) obj->getID());
+ } else {
+ add((uint32_t) 0);
+ }
+ }
+};
+
+/**
+ * A Type describes the Element and dimensions used for an Allocation or a
+ * parallel operation.
+ *
+ * A Type always includes an Element and an X dimension. A Type may be
+ * multidimensional, up to three dimensions. A nonzero value in the Y or Z
+ * dimensions indicates that the dimension is present. Note that a Type with
+ * only a given X dimension and a Type with the same X dimension but Y = 1 are
+ * not equivalent.
+ *
+ * A Type also supports inclusion of level of detail (LOD) or cube map
+ * faces. LOD and cube map faces are booleans to indicate present or not
+ * present.
+ *
+ * A Type also supports YUV format information to support an Allocation in a YUV
+ * format. The YUV formats supported are RS_YUV_YV12 and RS_YUV_NV21.
+ */
+class Type : public BaseObj {
+protected:
+ friend class Allocation;
+
+ uint32_t mDimX;
+ uint32_t mDimY;
+ uint32_t mDimZ;
+ RsYuvFormat mYuvFormat;
+ bool mDimMipmaps;
+ bool mDimFaces;
+ size_t mElementCount;
+ sp<const Element> mElement;
+
+ Type(void *id, sp<RS> rs);
+
+ void calcElementCount();
+ virtual void updateFromNative();
+
+public:
+
+ /**
+ * Returns the YUV format.
+ * @return YUV format of the Allocation
+ */
+ RsYuvFormat getYuvFormat() const {
+ return mYuvFormat;
+ }
+
+ /**
+ * Returns the Element of the Allocation.
+ * @return YUV format of the Allocation
+ */
+ sp<const Element> getElement() const {
+ return mElement;
+ }
+
+ /**
+ * Returns the X dimension of the Allocation.
+ * @return X dimension of the allocation
+ */
+ uint32_t getX() const {
+ return mDimX;
+ }
+
+ /**
+ * Returns the Y dimension of the Allocation.
+ * @return Y dimension of the allocation
+ */
+ uint32_t getY() const {
+ return mDimY;
+ }
+
+ /**
+ * Returns the Z dimension of the Allocation.
+ * @return Z dimension of the allocation
+ */
+ uint32_t getZ() const {
+ return mDimZ;
+ }
+
+ /**
+ * Returns true if the Allocation has mipmaps.
+ * @return true if the Allocation has mipmaps
+ */
+ bool hasMipmaps() const {
+ return mDimMipmaps;
+ }
+
+ /**
+ * Returns true if the Allocation is a cube map
+ * @return true if the Allocation is a cube map
+ */
+ bool hasFaces() const {
+ return mDimFaces;
+ }
+
+ /**
+ * Returns number of accessible Elements in the Allocation
+ * @return number of accessible Elements in the Allocation
+ */
+ size_t getCount() const {
+ return mElementCount;
+ }
+
+ /**
+ * Returns size in bytes of all Elements in the Allocation
+ * @return size in bytes of all Elements in the Allocation
+ */
+ size_t getSizeBytes() const {
+ return mElementCount * mElement->getSizeBytes();
+ }
+
+ /**
+ * Creates a new Type with the given Element and dimensions.
+ * @param[in] rs RenderScript context
+ * @param[in] e Element
+ * @param[in] dimX X dimension
+ * @param[in] dimY Y dimension
+ * @param[in] dimZ Z dimension
+ * @return new Type
+ */
+ static sp<const Type> create(const sp<RS>& rs, const sp<const Element>& e, uint32_t dimX, uint32_t dimY, uint32_t dimZ);
+
+ class Builder {
+ protected:
+ RS* mRS;
+ uint32_t mDimX;
+ uint32_t mDimY;
+ uint32_t mDimZ;
+ RsYuvFormat mYuvFormat;
+ bool mDimMipmaps;
+ bool mDimFaces;
+ sp<const Element> mElement;
+
+ public:
+ Builder(sp<RS> rs, sp<const Element> e);
+
+ void setX(uint32_t value);
+ void setY(uint32_t value);
+ void setZ(uint32_t value);
+ void setYuvFormat(RsYuvFormat format);
+ void setMipmaps(bool value);
+ void setFaces(bool value);
+ sp<const Type> create();
+ };
+
+};
+
+/**
+ * The parent class for all executable Scripts. This should not be used by applications.
+ */
+class Script : public BaseObj {
+private:
+
+protected:
+ Script(void *id, sp<RS> rs);
+ void forEach(uint32_t slot, const sp<const Allocation>& in, const sp<const Allocation>& out,
+ const void *v, size_t) const;
+ void bindAllocation(const sp<Allocation>& va, uint32_t slot) const;
+ void setVar(uint32_t index, const void *, size_t len) const;
+ void setVar(uint32_t index, const sp<const BaseObj>& o) const;
+ void invoke(uint32_t slot, const void *v, size_t len) const;
+
+
+ void invoke(uint32_t slot) const {
+ invoke(slot, NULL, 0);
+ }
+ void setVar(uint32_t index, float v) const {
+ setVar(index, &v, sizeof(v));
+ }
+ void setVar(uint32_t index, double v) const {
+ setVar(index, &v, sizeof(v));
+ }
+ void setVar(uint32_t index, int32_t v) const {
+ setVar(index, &v, sizeof(v));
+ }
+ void setVar(uint32_t index, uint32_t v) const {
+ setVar(index, &v, sizeof(v));
+ }
+ void setVar(uint32_t index, int64_t v) const {
+ setVar(index, &v, sizeof(v));
+ }
+ void setVar(uint32_t index, bool v) const {
+ setVar(index, &v, sizeof(v));
+ }
+
+public:
+ class FieldBase {
+ protected:
+ sp<const Element> mElement;
+ sp<Allocation> mAllocation;
+
+ void init(const sp<RS>& rs, uint32_t dimx, uint32_t usages = 0);
+
+ public:
+ sp<const Element> getElement() {
+ return mElement;
+ }
+
+ sp<const Type> getType() {
+ return mAllocation->getType();
+ }
+
+ sp<const Allocation> getAllocation() {
+ return mAllocation;
+ }
+
+ //void updateAllocation();
+ };
+};
+
+/**
+ * The parent class for all user-defined scripts. This is intended to be used by auto-generated code only.
+ */
+class ScriptC : public Script {
+protected:
+ ScriptC(sp<RS> rs,
+ const void *codeTxt, size_t codeLength,
+ const char *cachedName, size_t cachedNameLength,
+ const char *cacheDir, size_t cacheDirLength);
+
+};
+
+/**
+ * The parent class for all script intrinsics. Intrinsics provide highly optimized implementations of
+ * basic functions. This is not intended to be used directly.
+ */
+class ScriptIntrinsic : public Script {
+ protected:
+ sp<const Element> mElement;
+ ScriptIntrinsic(sp<RS> rs, int id, sp<const Element> e);
+ virtual ~ScriptIntrinsic();
+};
+
+/**
+ * Intrinsic for converting RGB to RGBA by using a 3D lookup table. The incoming
+ * r,g,b values are use as normalized x,y,z coordinates into a 3D
+ * allocation. The 8 nearest values are sampled and linearly interpolated. The
+ * result is placed in the output.
+ */
+class ScriptIntrinsic3DLUT : public ScriptIntrinsic {
+ private:
+ ScriptIntrinsic3DLUT(sp<RS> rs, sp<const Element> e);
+ public:
+ /**
+ * Supported Element types are U8_4. Default lookup table is identity.
+ * @param[in] rs RenderScript context
+ * @param[in] e Element
+ * @return new ScriptIntrinsic
+ */
+ static sp<ScriptIntrinsic3DLUT> create(const sp<RS>& rs, const sp<const Element>& e);
+
+ /**
+ * Launch the intrinsic.
+ * @param[in] ain input Allocation
+ * @param[in] aout output Allocation
+ */
+ void forEach(const sp<Allocation>& ain, const sp<Allocation>& aout);
+
+ /**
+ * Sets the lookup table. The lookup table must use the same Element as the
+ * intrinsic.
+ * @param[in] lut new lookup table
+ */
+ void setLUT(const sp<Allocation>& lut);
+};
+
+
+/**
+ * Intrinsic kernel provides high performance RenderScript APIs to BLAS.
+ *
+ * The BLAS (Basic Linear Algebra Subprograms) are routines that provide standard
+ * building blocks for performing basic vector and matrix operations.
+ *
+ * For detailed description of BLAS, please refer to http://www.netlib.org/blas/
+ *
+ **/
+class ScriptIntrinsicBLAS : public ScriptIntrinsic {
+ private:
+ ScriptIntrinsicBLAS(sp<RS> rs, sp<const Element> e);
+ public:
+ /**
+ * Create an intrinsic to access BLAS subroutines.
+ *
+ * @param rs The RenderScript context
+ * @return ScriptIntrinsicBLAS
+ */
+ static sp<ScriptIntrinsicBLAS> create(const sp<RS>& rs);
+
+ /**
+ * SGEMV performs one of the matrix-vector operations
+ * y := alpha*A*x + beta*y or y := alpha*A**T*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/d58/sgemv_8f.html
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void SGEMV(RsBlasTranspose TransA,
+ float alpha, const sp<Allocation>& A, const sp<Allocation>& X, int incX,
+ float beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * DGEMV performs one of the matrix-vector operations
+ * y := alpha*A*x + beta*y or y := alpha*A**T*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/dc/da8/dgemv_8f.html
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void DGEMV(RsBlasTranspose TransA,
+ double alpha, const sp<Allocation>& A, const sp<Allocation>& X, int incX,
+ double beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * CGEMV performs one of the matrix-vector operations
+ * y := alpha*A*x + beta*y or y := alpha*A**T*x + beta*y or y := alpha*A**H*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d4/d8a/cgemv_8f.html
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void CGEMV(RsBlasTranspose TransA,
+ Float2 alpha, const sp<Allocation>& A, const sp<Allocation>& X, int incX,
+ Float2 beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * ZGEMV performs one of the matrix-vector operations
+ * y := alpha*A*x + beta*y or y := alpha*A**T*x + beta*y or y := alpha*A**H*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/d40/zgemv_8f.html
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void ZGEMV(RsBlasTranspose TransA,
+ Double2 alpha, const sp<Allocation>& A, const sp<Allocation>& X, int incX,
+ Double2 beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * SGBMV performs one of the matrix-vector operations
+ * y := alpha*A*x + beta*y or y := alpha*A**T*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d6/d46/sgbmv_8f.html
+ *
+ * Note: For a M*N matrix, the input Allocation should also be of size M*N (dimY = M, dimX = N),
+ * but only the region M*(KL+KU+1) will be referenced. The following subroutine can is an
+ * example showing how to convert the original matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, m):
+ * for j in range(max(0, i-kl), min(i+ku+1, n)):
+ * b[i, j-i+kl] = a[i, j]
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param KL The number of sub-diagonals of the matrix A.
+ * @param KU The number of super-diagonals of the matrix A.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains the band matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void SGBMV(RsBlasTranspose TransA,
+ int KL, int KU, float alpha, const sp<Allocation>& A, const sp<Allocation>& X, int incX,
+ float beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * DGBMV performs one of the matrix-vector operations
+ * y := alpha*A*x + beta*y or y := alpha*A**T*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d2/d3f/dgbmv_8f.html
+ *
+ * Note: For a M*N matrix, the input Allocation should also be of size M*N (dimY = M, dimX = N),
+ * but only the region M*(KL+KU+1) will be referenced. The following subroutine can is an
+ * example showing how to convert the original matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, m):
+ * for j in range(max(0, i-kl), min(i+ku+1, n)):
+ * b[i, j-i+kl] = a[i, j]
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param KL The number of sub-diagonals of the matrix A.
+ * @param KU The number of super-diagonals of the matrix A.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains the band matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void DGBMV(RsBlasTranspose TransA,
+ int KL, int KU, double alpha, const sp<Allocation>& A, const sp<Allocation>& X,
+ int incX, double beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * CGBMV performs one of the matrix-vector operations
+ * y := alpha*A*x + beta*y or y := alpha*A**T*x + beta*y or y := alpha*A**H*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d0/d75/cgbmv_8f.html
+ *
+ * Note: For a M*N matrix, the input Allocation should also be of size M*N (dimY = M, dimX = N),
+ * but only the region M*(KL+KU+1) will be referenced. The following subroutine can is an
+ * example showing how to convert the original matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, m):
+ * for j in range(max(0, i-kl), min(i+ku+1, n)):
+ * b[i, j-i+kl] = a[i, j]
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param KL The number of sub-diagonals of the matrix A.
+ * @param KU The number of super-diagonals of the matrix A.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains the band matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void CGBMV(RsBlasTranspose TransA,
+ int KL, int KU, Float2 alpha, const sp<Allocation>& A, const sp<Allocation>& X,
+ int incX, Float2 beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * ZGBMV performs one of the matrix-vector operations
+ * y := alpha*A*x + beta*y or y := alpha*A**T*x + beta*y or y := alpha*A**H*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d9/d46/zgbmv_8f.html
+ *
+ * Note: For a M*N matrix, the input Allocation should also be of size M*N (dimY = M, dimX = N),
+ * but only the region M*(KL+KU+1) will be referenced. The following subroutine can is an
+ * example showing how to convert the original matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, m):
+ * for j in range(max(0, i-kl), min(i+ku+1, n)):
+ * b[i, j-i+kl] = a[i, j]
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param KL The number of sub-diagonals of the matrix A.
+ * @param KU The number of super-diagonals of the matrix A.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains the band matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void ZGBMV(RsBlasTranspose TransA,
+ int KL, int KU, Double2 alpha, const sp<Allocation>& A, const sp<Allocation>& X, int incX,
+ Double2 beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * STRMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/de/d45/strmv_8f.html
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void STRMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * DTRMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/dc/d7e/dtrmv_8f.html
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void DTRMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * CTRMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x or x := A**H*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/df/d78/ctrmv_8f.html
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void CTRMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * ZTRMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x or x := A**H*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d0/dd1/ztrmv_8f.html
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void ZTRMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * STBMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d6/d7d/stbmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param K The number of off-diagonals of the matrix A
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void STBMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ int K, const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * DTBMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/df/d29/dtbmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param K The number of off-diagonals of the matrix A
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void DTBMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ int K, const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * CTBMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x or x := A**H*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d3/dcd/ctbmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param K The number of off-diagonals of the matrix A
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void CTBMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ int K, const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * ZTBMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x or x := A**H*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d3/d39/ztbmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param K The number of off-diagonals of the matrix A
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void ZTBMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ int K, const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * STPMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/db1/stpmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param Ap The input allocation contains packed matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void STPMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& Ap, const sp<Allocation>& X, int incX);
+
+ /**
+ * DTPMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/dc/dcd/dtpmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param Ap The input allocation contains packed matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void DTPMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& Ap, const sp<Allocation>& X, int incX);
+
+ /**
+ * CTPMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x or x := A**H*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d4/dbb/ctpmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param Ap The input allocation contains packed matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void CTPMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& Ap, const sp<Allocation>& X, int incX);
+
+ /**
+ * ZTPMV performs one of the matrix-vector operations
+ * x := A*x or x := A**T*x or x := A**H*x
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d2/d9e/ztpmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param Ap The input allocation contains packed matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void ZTPMV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& Ap, const sp<Allocation>& X, int incX);
+
+ /**
+ * STRSV solves one of the systems of equations
+ * A*x = b or A**T*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d0/d2a/strsv_8f.html
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void STRSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * DTRSV solves one of the systems of equations
+ * A*x = b or A**T*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d6/d96/dtrsv_8f.html
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void DTRSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * CTRSV solves one of the systems of equations
+ * A*x = b or A**T*x = b or A**H*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d4/dc8/ctrsv_8f.html
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void CTRSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * ZTRSV solves one of the systems of equations
+ * A*x = b or A**T*x = b or A**H*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d1/d2f/ztrsv_8f.html
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void ZTRSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * STBSV solves one of the systems of equations
+ * A*x = b or A**T*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d0/d1f/stbsv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param K The number of off-diagonals of the matrix A
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void STBSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ int K, const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * DTBSV solves one of the systems of equations
+ * A*x = b or A**T*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d4/dcf/dtbsv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param K The number of off-diagonals of the matrix A
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void DTBSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ int K, const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * CTBSV solves one of the systems of equations
+ * A*x = b or A**T*x = b or A**H*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d9/d5f/ctbsv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param K The number of off-diagonals of the matrix A
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void CTBSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ int K, const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * ZTBSV solves one of the systems of equations
+ * A*x = b or A**T*x = b or A**H*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d4/d5a/ztbsv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param K The number of off-diagonals of the matrix A
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void ZTBSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ int K, const sp<Allocation>& A, const sp<Allocation>& X, int incX);
+
+ /**
+ * STPSV solves one of the systems of equations
+ * A*x = b or A**T*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d0/d7c/stpsv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param Ap The input allocation contains packed matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void STPSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& Ap, const sp<Allocation>& X, int incX);
+
+ /**
+ * DTPSV solves one of the systems of equations
+ * A*x = b or A**T*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d9/d84/dtpsv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param Ap The input allocation contains packed matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void DTPSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& Ap, const sp<Allocation>& X, int incX);
+
+ /**
+ * CTPSV solves one of the systems of equations
+ * A*x = b or A**T*x = b or A**H*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d8/d56/ctpsv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param Ap The input allocation contains packed matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void CTPSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& Ap, const sp<Allocation>& X, int incX);
+
+ /**
+ * ZTPSV solves one of the systems of equations
+ * A*x = b or A**T*x = b or A**H*x = b
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/da/d57/ztpsv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the matrix is an upper or lower triangular matrix.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param Ap The input allocation contains packed matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ */
+ void ZTPSV(RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ const sp<Allocation>& Ap, const sp<Allocation>& X, int incX);
+
+ /**
+ * SSYMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d2/d94/ssymv_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void SSYMV(RsBlasUplo Uplo, float alpha, const sp<Allocation>& A, const sp<Allocation>& X,
+ int incX, float beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * SSBMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d3/da1/ssbmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of the band matrix A is being supplied.
+ * @param K The number of off-diagonals of the matrix A
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void SSBMV(RsBlasUplo Uplo, int K, float alpha, const sp<Allocation>& A, const sp<Allocation>& X,
+ int incX, float beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * SSPMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d8/d68/sspmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of the matrix A is supplied in packed form.
+ * @param alpha The scalar alpha.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void SSPMV(RsBlasUplo Uplo, float alpha, const sp<Allocation>& Ap, const sp<Allocation>& X,
+ int incX, float beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * SGER performs the rank 1 operation
+ * A := alpha*x*y**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/d5c/sger_8f.html
+ *
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ */
+ void SGER(float alpha, const sp<Allocation>& X, int incX, const sp<Allocation>& Y, int incY, const sp<Allocation>& A);
+
+ /**
+ * SSYR performs the rank 1 operation
+ * A := alpha*x*x**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d6/dac/ssyr_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ */
+ void SSYR(RsBlasUplo Uplo, float alpha, const sp<Allocation>& X, int incX, const sp<Allocation>& A);
+
+ /**
+ * SSPR performs the rank 1 operation
+ * A := alpha*x*x**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d2/d9b/sspr_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be supplied in the packed form.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F32}.
+ */
+ void SSPR(RsBlasUplo Uplo, float alpha, const sp<Allocation>& X, int incX, const sp<Allocation>& Ap);
+
+ /**
+ * SSYR2 performs the symmetric rank 2 operation
+ * A := alpha*x*y**T + alpha*y*x**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/d99/ssyr2_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ */
+ void SSYR2(RsBlasUplo Uplo, float alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& A);
+
+ /**
+ * SSPR2 performs the symmetric rank 2 operation
+ * A := alpha*x*y**T + alpha*y*x**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/d3e/sspr2_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be supplied in the packed form.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F32}.
+ */
+ void SSPR2(RsBlasUplo Uplo, float alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& Ap);
+
+ /**
+ * DSYMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d8/dbe/dsymv_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void DSYMV(RsBlasUplo Uplo, double alpha, const sp<Allocation>& A, const sp<Allocation>& X, int incX,
+ double beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * DSBMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d8/d1e/dsbmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of the band matrix A is being supplied.
+ * @param K The number of off-diagonals of the matrix A
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void DSBMV(RsBlasUplo Uplo, int K, double alpha, const sp<Allocation>& A, const sp<Allocation>& X, int incX,
+ double beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * DSPMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d4/d85/dspmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of the matrix A is supplied in packed form.
+ * @param alpha The scalar alpha.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void DSPMV(RsBlasUplo Uplo, double alpha, const sp<Allocation>& Ap, const sp<Allocation>& X, int incX,
+ double beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * DGER performs the rank 1 operation
+ * A := alpha*x*y**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/dc/da8/dger_8f.html
+ *
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ */
+ void DGER(double alpha, const sp<Allocation>& X, int incX, const sp<Allocation>& Y, int incY, const sp<Allocation>& A);
+
+ /**
+ * DSYR performs the rank 1 operation
+ * A := alpha*x*x**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d3/d60/dsyr_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ */
+ void DSYR(RsBlasUplo Uplo, double alpha, const sp<Allocation>& X, int incX, const sp<Allocation>& A);
+
+ /**
+ * DSPR performs the rank 1 operation
+ * A := alpha*x*x**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/dd/dba/dspr_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be supplied in the packed form.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F64}.
+ */
+ void DSPR(RsBlasUplo Uplo, double alpha, const sp<Allocation>& X, int incX, const sp<Allocation>& Ap);
+
+ /**
+ * DSYR2 performs the symmetric rank 2 operation
+ * A := alpha*x*y**T + alpha*y*x**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/de/d41/dsyr2_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ */
+ void DSYR2(RsBlasUplo Uplo, double alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& A);
+
+ /**
+ * DSPR2 performs the symmetric rank 2 operation
+ * A := alpha*x*y**T + alpha*y*x**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/dd/d9e/dspr2_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be supplied in the packed form.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F64}.
+ */
+ void DSPR2(RsBlasUplo Uplo, double alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& Ap);
+
+ /**
+ * CHEMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d7/d51/chemv_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void CHEMV(RsBlasUplo Uplo, Float2 alpha, const sp<Allocation>& A, const sp<Allocation>& X,
+ int incX, Float2 beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * CHBMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/dc2/chbmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of the band matrix A is being supplied.
+ * @param K The number of off-diagonals of the matrix A
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void CHBMV(RsBlasUplo Uplo, int K, Float2 alpha, const sp<Allocation>& A, const sp<Allocation>& X,
+ int incX, Float2 beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * CHPMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d2/d06/chpmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of the matrix A is supplied in packed form.
+ * @param alpha The scalar alpha.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void CHPMV(RsBlasUplo Uplo, Float2 alpha, const sp<Allocation>& Ap, const sp<Allocation>& X,
+ int incX, Float2 beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * CGERU performs the rank 1 operation
+ * A := alpha*x*y**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/d5f/cgeru_8f.html
+ *
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ */
+ void CGERU(Float2 alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& A);
+
+ /**
+ * CGERC performs the rank 1 operation
+ * A := alpha*x*y**H + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/dd/d84/cgerc_8f.html
+ *
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ */
+ void CGERC(Float2 alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& A);
+
+ /**
+ * CHER performs the rank 1 operation
+ * A := alpha*x*x**H + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d3/d6d/cher_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ */
+ void CHER(RsBlasUplo Uplo, float alpha, const sp<Allocation>& X, int incX, const sp<Allocation>& A);
+
+ /**
+ * CHPR performs the rank 1 operation
+ * A := alpha*x*x**H + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/dcd/chpr_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be supplied in the packed form.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ */
+ void CHPR(RsBlasUplo Uplo, float alpha, const sp<Allocation>& X, int incX, const sp<Allocation>& Ap);
+
+ /**
+ * CHER2 performs the symmetric rank 2 operation
+ * A := alpha*x*y**H + alpha*y*x**H + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/d87/cher2_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ */
+ void CHER2(RsBlasUplo Uplo, Float2 alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& A);
+
+ /**
+ * CHPR2 performs the symmetric rank 2 operation
+ * A := alpha*x*y**H + alpha*y*x**H + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d6/d44/chpr2_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be supplied in the packed form.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F32_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F32_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ */
+ void CHPR2(RsBlasUplo Uplo, Float2 alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& Ap);
+
+ /**
+ * ZHEMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d0/ddd/zhemv_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void ZHEMV(RsBlasUplo Uplo, Double2 alpha, const sp<Allocation>& A, const sp<Allocation>& X,
+ int incX, Double2 beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * ZHBMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d3/d1a/zhbmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should also be of size N*N (dimY = N, dimX = N),
+ * but only the region N*(K+1) will be referenced. The following subroutine can is an
+ * example showing how to convert a UPPER trianglar matrix 'a' to row-based band matrix 'b'.
+ * for i in range(0, n):
+ * for j in range(i, min(i+k+1, n)):
+ * b[i, j-i] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of the band matrix A is being supplied.
+ * @param K The number of off-diagonals of the matrix A
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void ZHBMV(RsBlasUplo Uplo, int K, Double2 alpha, const sp<Allocation>& A, const sp<Allocation>& X,
+ int incX, Double2 beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * ZHPMV performs the matrix-vector operation
+ * y := alpha*A*x + beta*y
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d0/d60/zhpmv_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of the matrix A is supplied in packed form.
+ * @param alpha The scalar alpha.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param beta The scalar beta.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ */
+ void ZHPMV(RsBlasUplo Uplo, Double2 alpha, const sp<Allocation>& Ap, const sp<Allocation>& X,
+ int incX, Double2 beta, const sp<Allocation>& Y, int incY);
+
+ /**
+ * ZGERU performs the rank 1 operation
+ * A := alpha*x*y**T + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d7/d12/zgeru_8f.html
+ *
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ */
+ void ZGERU(Double2 alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& A);
+
+ /**
+ * ZGERC performs the rank 1 operation
+ * A := alpha*x*y**H + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d3/dad/zgerc_8f.html
+ *
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ */
+ void ZGERC(Double2 alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& A);
+
+ /**
+ * ZHER performs the rank 1 operation
+ * A := alpha*x*x**H + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/de/d0e/zher_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ */
+ void ZHER(RsBlasUplo Uplo, double alpha, const sp<Allocation>& X, int incX, const sp<Allocation>& A);
+
+ /**
+ * ZHPR performs the rank 1 operation
+ * A := alpha*x*x**H + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/de/de1/zhpr_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be supplied in the packed form.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ */
+ void ZHPR(RsBlasUplo Uplo, double alpha, const sp<Allocation>& X, int incX, const sp<Allocation>& Ap);
+
+ /**
+ * ZHER2 performs the symmetric rank 2 operation
+ * A := alpha*x*y**H + alpha*y*x**H + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/da/d8a/zher2_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ */
+ void ZHER2(RsBlasUplo Uplo, Double2 alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& A);
+
+ /**
+ * ZHPR2 performs the symmetric rank 2 operation
+ * A := alpha*x*y**H + alpha*y*x**H + A
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d5/d52/zhpr2_8f.html
+ *
+ * Note: For a N*N matrix, the input Allocation should be a 1D allocation of size dimX = N*(N+1)/2,
+ * The following subroutine can is an example showing how to convert a UPPER trianglar matrix
+ * 'a' to packed matrix 'b'.
+ * k = 0
+ * for i in range(0, n):
+ * for j in range(i, n):
+ * b[k++] = a[i, j]
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part is to be supplied in the packed form.
+ * @param alpha The scalar alpha.
+ * @param X The input allocation contains vector x, supported elements type: {Element#F64_2}.
+ * @param incX The increment for the elements of vector x, must be larger than zero.
+ * @param Y The input allocation contains vector y, supported elements type: {Element#F64_2}.
+ * @param incY The increment for the elements of vector y, must be larger than zero.
+ * @param Ap The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ */
+ void ZHPR2(RsBlasUplo Uplo, Double2 alpha, const sp<Allocation>& X, int incX,
+ const sp<Allocation>& Y, int incY, const sp<Allocation>& Ap);
+
+ /**
+ * SGEMM performs one of the matrix-matrix operations
+ * C := alpha*op(A)*op(B) + beta*C where op(X) is one of op(X) = X or op(X) = X**T
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d4/de2/sgemm_8f.html
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param TransB The type of transpose applied to matrix B.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32}.
+ */
+ void SGEMM(RsBlasTranspose TransA, RsBlasTranspose TransB, float alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, float beta, const sp<Allocation>& C);
+
+
+ /**
+ * DGEMM performs one of the matrix-matrix operations
+ * C := alpha*op(A)*op(B) + beta*C where op(X) is one of op(X) = X or op(X) = X**T
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d7/d2b/dgemm_8f.html
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param TransB The type of transpose applied to matrix B.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64}.
+ */
+ void DGEMM(RsBlasTranspose TransA, RsBlasTranspose TransB, double alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, double beta, const sp<Allocation>& C);
+
+ /**
+ * CGEMM performs one of the matrix-matrix operations
+ * C := alpha*op(A)*op(B) + beta*C where op(X) is one of op(X) = X or op(X) = X**T or op(X) = X**H
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d6/d5b/cgemm_8f.html
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param TransB The type of transpose applied to matrix B.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32_2}.
+ */
+ void CGEMM(RsBlasTranspose TransA, RsBlasTranspose TransB, Float2 alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, Float2 beta, const sp<Allocation>& C);
+
+ /**
+ * ZGEMM performs one of the matrix-matrix operations
+ * C := alpha*op(A)*op(B) + beta*C where op(X) is one of op(X) = X or op(X) = X**T or op(X) = X**H
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d7/d76/zgemm_8f.html
+ *
+ * @param TransA The type of transpose applied to matrix A.
+ * @param TransB The type of transpose applied to matrix B.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64_2
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64_2
+ */
+ void ZGEMM(RsBlasTranspose TransA, RsBlasTranspose TransB, Double2 alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, Double2 beta, const sp<Allocation>& C);
+
+ /**
+ * SSYMM performs one of the matrix-matrix operations
+ * C := alpha*A*B + beta*C or C := alpha*B*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d7/d42/ssymm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32}.
+ */
+ void SSYMM(RsBlasSide Side, RsBlasUplo Uplo, float alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, float beta, const sp<Allocation>& C);
+
+ /**
+ * DSYMM performs one of the matrix-matrix operations
+ * C := alpha*A*B + beta*C or C := alpha*B*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d8/db0/dsymm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64}.
+ */
+ void DSYMM(RsBlasSide Side, RsBlasUplo Uplo, double alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, double beta, const sp<Allocation>& C);
+
+ /**
+ * CSYMM performs one of the matrix-matrix operations
+ * C := alpha*A*B + beta*C or C := alpha*B*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/db/d59/csymm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32_2}.
+ */
+ void CSYMM(RsBlasSide Side, RsBlasUplo Uplo, Float2 alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, Float2 beta, const sp<Allocation>& C);
+
+ /**
+ * ZSYMM performs one of the matrix-matrix operations
+ * C := alpha*A*B + beta*C or C := alpha*B*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/df/d51/zsymm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64_2}.
+ */
+ void ZSYMM(RsBlasSide Side, RsBlasUplo Uplo, Double2 alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, Double2 beta, const sp<Allocation>& C);
+
+ /**
+ * SSYRK performs one of the symmetric rank k operations
+ * C := alpha*A*A**T + beta*C or C := alpha*A**T*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d0/d40/ssyrk_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32}.
+ */
+ void SSYRK(RsBlasUplo Uplo, RsBlasTranspose Trans, float alpha,
+ const sp<Allocation>& A, float beta, const sp<Allocation>& C);
+
+ /**
+ * DSYRK performs one of the symmetric rank k operations
+ * C := alpha*A*A**T + beta*C or C := alpha*A**T*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/dc/d05/dsyrk_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64}.
+ */
+ void DSYRK(RsBlasUplo Uplo, RsBlasTranspose Trans, double alpha,
+ const sp<Allocation>& A, double beta, const sp<Allocation>& C);
+
+ /**
+ * CSYRK performs one of the symmetric rank k operations
+ * C := alpha*A*A**T + beta*C or C := alpha*A**T*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d3/d6a/csyrk_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32_2}.
+ */
+ void CSYRK(RsBlasUplo Uplo, RsBlasTranspose Trans, Float2 alpha,
+ const sp<Allocation>& A, Float2 beta, const sp<Allocation>& C);
+
+ /**
+ * ZSYRK performs one of the symmetric rank k operations
+ * C := alpha*A*A**T + beta*C or C := alpha*A**T*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/de/d54/zsyrk_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64_2}.
+ */
+ void ZSYRK(RsBlasUplo Uplo, RsBlasTranspose Trans, Double2 alpha,
+ const sp<Allocation>& A, Double2 beta, const sp<Allocation>& C);
+
+ /**
+ * SSYR2K performs one of the symmetric rank 2k operations
+ * C := alpha*A*B**T + alpha*B*A**T + beta*C or C := alpha*A**T*B + alpha*B**T*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/df/d3d/ssyr2k_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32}.
+ */
+ void SSYR2K(RsBlasUplo Uplo, RsBlasTranspose Trans, float alpha,
+ const sp<Allocation>& A, const sp<Allocation>& B, float beta, const sp<Allocation>& C);
+
+ /**
+ * DSYR2K performs one of the symmetric rank 2k operations
+ * C := alpha*A*B**T + alpha*B*A**T + beta*C or C := alpha*A**T*B + alpha*B**T*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d1/dec/dsyr2k_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64}.
+ */
+ void DSYR2K(RsBlasUplo Uplo, RsBlasTranspose Trans, double alpha,
+ const sp<Allocation>& A, const sp<Allocation>& B, double beta, const sp<Allocation>& C);
+
+ /**
+ * CSYR2K performs one of the symmetric rank 2k operations
+ * C := alpha*A*B**T + alpha*B*A**T + beta*C or C := alpha*A**T*B + alpha*B**T*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/de/d7e/csyr2k_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32_2}.
+ */
+ void CSYR2K(RsBlasUplo Uplo, RsBlasTranspose Trans, Float2 alpha,
+ const sp<Allocation>& A, const sp<Allocation>& B, Float2 beta, const sp<Allocation>& C);
+
+ /**
+ * ZSYR2K performs one of the symmetric rank 2k operations
+ * C := alpha*A*B**T + alpha*B*A**T + beta*C or C := alpha*A**T*B + alpha*B**T*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/df/d20/zsyr2k_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64_2}.
+ */
+ void ZSYR2K(RsBlasUplo Uplo, RsBlasTranspose Trans, Double2 alpha,
+ const sp<Allocation>& A, const sp<Allocation>& B, Double2 beta, const sp<Allocation>& C);
+
+ /**
+ * STRMM performs one of the matrix-matrix operations
+ * B := alpha*op(A)*B or B := alpha*B*op(A)
+ * op(A) is one of op(A) = A or op(A) = A**T
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/df/d01/strmm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether matrix A is upper or lower triangular.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32}.
+ */
+ void STRMM(RsBlasSide Side, RsBlasUplo Uplo, RsBlasTranspose TransA,
+ RsBlasDiag Diag, float alpha, const sp<Allocation>& A, const sp<Allocation>& B);
+
+ /**
+ * DTRMM performs one of the matrix-matrix operations
+ * B := alpha*op(A)*B or B := alpha*B*op(A)
+ * op(A) is one of op(A) = A or op(A) = A**T
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/dd/d19/dtrmm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether matrix A is upper or lower triangular.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64}.
+ */
+ void DTRMM(RsBlasSide Side, RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ double alpha, const sp<Allocation>& A, const sp<Allocation>& B);
+
+ /**
+ * CTRMM performs one of the matrix-matrix operations
+ * B := alpha*op(A)*B or B := alpha*B*op(A)
+ * op(A) is one of op(A) = A or op(A) = A**T or op(A) = A**H
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d4/d9b/ctrmm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether matrix A is upper or lower triangular.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32_2}.
+ */
+ void CTRMM(RsBlasSide Side, RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ Float2 alpha, const sp<Allocation>& A, const sp<Allocation>& B);
+
+ /**
+ * ZTRMM performs one of the matrix-matrix operations
+ * B := alpha*op(A)*B or B := alpha*B*op(A)
+ * op(A) is one of op(A) = A or op(A) = A**T or op(A) = A**H
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d8/de1/ztrmm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether matrix A is upper or lower triangular.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64_2}.
+ */
+ void ZTRMM(RsBlasSide Side, RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ Double2 alpha, const sp<Allocation>& A, const sp<Allocation>& B);
+
+ /**
+ * STRSM solves one of the matrix equations
+ * op(A)*X := alpha*B or X*op(A) := alpha*B
+ * op(A) is one of op(A) = A or op(A) = A**T
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d2/d8b/strsm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether matrix A is upper or lower triangular.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32}.
+ */
+ void STRSM(RsBlasSide Side, RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ float alpha, const sp<Allocation>& A, const sp<Allocation>& B);
+
+ /**
+ * DTRSM solves one of the matrix equations
+ * op(A)*X := alpha*B or X*op(A) := alpha*B
+ * op(A) is one of op(A) = A or op(A) = A**T
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/de/da7/dtrsm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether matrix A is upper or lower triangular.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64}.
+ */
+ void DTRSM(RsBlasSide Side, RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ double alpha, const sp<Allocation>& A, const sp<Allocation>& B);
+
+ /**
+ * CTRSM solves one of the matrix equations
+ * op(A)*X := alpha*B or X*op(A) := alpha*B
+ * op(A) is one of op(A) = A or op(A) = A**T or op(A) = A**H
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/de/d30/ctrsm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether matrix A is upper or lower triangular.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32_2}.
+ */
+ void CTRSM(RsBlasSide Side, RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ Float2 alpha, const sp<Allocation>& A, const sp<Allocation>& B);
+
+ /**
+ * ZTRSM solves one of the matrix equations
+ * op(A)*X := alpha*B or X*op(A) := alpha*B
+ * op(A) is one of op(A) = A or op(A) = A**T or op(A) = A**H
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d1/d39/ztrsm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether matrix A is upper or lower triangular.
+ * @param TransA The type of transpose applied to matrix A.
+ * @param Diag Specifies whether or not A is unit triangular.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64_2}.
+ */
+ void ZTRSM(RsBlasSide Side, RsBlasUplo Uplo, RsBlasTranspose TransA, RsBlasDiag Diag,
+ Double2 alpha, const sp<Allocation>& A, const sp<Allocation>& B);
+
+ /**
+ * CHEMM performs one of the matrix-matrix operations
+ * C := alpha*A*B + beta*C or C := alpha*B*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d3/d66/chemm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32_2}.
+ */
+ void CHEMM(RsBlasSide Side, RsBlasUplo Uplo, Float2 alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, Float2 beta, const sp<Allocation>& C);
+
+ /**
+ * ZHEMM performs one of the matrix-matrix operations
+ * C := alpha*A*B + beta*C or C := alpha*B*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d6/d3e/zhemm_8f.html
+ *
+ * @param Side Specifies whether the symmetric matrix A appears on the left or right.
+ * @param Uplo Specifies whether the upper or lower triangular part is to be referenced.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64_2}.
+ */
+ void ZHEMM(RsBlasSide Side, RsBlasUplo Uplo, Double2 alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, Double2 beta, const sp<Allocation>& C);
+
+ /**
+ * CHERK performs one of the hermitian rank k operations
+ * C := alpha*A*A**H + beta*C or C := alpha*A**H*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d8/d52/cherk_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32_2}.
+ */
+ void CHERK(RsBlasUplo Uplo, RsBlasTranspose Trans, float alpha, const sp<Allocation>& A,
+ float beta, const sp<Allocation>& C);
+
+ /**
+ * ZHERK performs one of the hermitian rank k operations
+ * C := alpha*A*A**H + beta*C or C := alpha*A**H*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d1/db1/zherk_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64_2}.
+ */
+ void ZHERK(RsBlasUplo Uplo, RsBlasTranspose Trans, double alpha, const sp<Allocation>& A,
+ double beta, const sp<Allocation>& C);
+
+ /**
+ * CHER2K performs one of the hermitian rank 2k operations
+ * C := alpha*A*B**H + conjg( alpha )*B*A**H + beta*C or C := alpha*A**H*B + conjg( alpha )*B**H*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d1/d82/cher2k_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F32_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F32_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F32_2}.
+ */
+ void CHER2K(RsBlasUplo Uplo, RsBlasTranspose Trans, Float2 alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, float beta, const sp<Allocation>& C);
+
+ /**
+ * ZHER2K performs one of the hermitian rank 2k operations
+ * C := alpha*A*B**H + conjg( alpha )*B*A**H + beta*C or C := alpha*A**H*B + conjg( alpha )*B**H*A + beta*C
+ *
+ * Details: http://www.netlib.org/lapack/explore-html/d7/dfa/zher2k_8f.html
+ *
+ * @param Uplo Specifies whether the upper or lower triangular part of C is to be referenced.
+ * @param Trans The type of transpose applied to the operation.
+ * @param alpha The scalar alpha.
+ * @param A The input allocation contains matrix A, supported elements type: {Element#F64_2}.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#F64_2}.
+ * @param beta The scalar beta.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#F64_2}.
+ */
+ void ZHER2K(RsBlasUplo Uplo, RsBlasTranspose Trans, Double2 alpha, const sp<Allocation>& A,
+ const sp<Allocation>& B, double beta, const sp<Allocation>& C);
+
+ /**
+ * 8-bit GEMM-like operation for neural networks: C = A * Transpose(B)
+ * Calculations are done in 1.10.21 fixed-point format for the final output,
+ * just before there's a shift down to drop the fractional parts. The output
+ * values are gated to 0 to 255 to fit in a byte, but the 10-bit format
+ * gives some headroom to avoid wrapping around on small overflows.
+ *
+ * @param A The input allocation contains matrix A, supported elements type: {Element#U8}.
+ * @param a_offset The offset for all values in matrix A, e.g A[i,j] = A[i,j] - a_offset. Value should be from 0 to 255.
+ * @param B The input allocation contains matrix B, supported elements type: {Element#U8}.
+ * @param b_offset The offset for all values in matrix B, e.g B[i,j] = B[i,j] - b_offset. Value should be from 0 to 255.
+ * @param C The input allocation contains matrix C, supported elements type: {Element#U8}.
+ * @param c_offset The offset for all values in matrix C.
+ * @param c_mult The multiplier for all values in matrix C, e.g C[i,j] = (C[i,j] + c_offset) * c_mult.
+ **/
+ void BNNM(const sp<Allocation>& A, int a_offset, const sp<Allocation>& B, int b_offset, const sp<Allocation>& C,
+ int c_offset, int c_mult);
+};
+
+/**
+ * Intrinsic kernel for blending two Allocations.
+ */
+class ScriptIntrinsicBlend : public ScriptIntrinsic {
+ private:
+ ScriptIntrinsicBlend(sp<RS> rs, sp<const Element> e);
+ public:
+ /**
+ * Supported Element types are U8_4.
+ * @param[in] rs RenderScript context
+ * @param[in] e Element
+ * @return new ScriptIntrinsicBlend
+ */
+ static sp<ScriptIntrinsicBlend> create(const sp<RS>& rs, const sp<const Element>& e);
+ /**
+ * sets dst = {0, 0, 0, 0}
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachClear(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = src
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachSrc(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = dst (NOP)
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachDst(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = src + dst * (1.0 - src.a)
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachSrcOver(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = dst + src * (1.0 - dst.a)
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachDstOver(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = src * dst.a
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachSrcIn(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = dst * src.a
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachDstIn(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = src * (1.0 - dst.a)
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachSrcOut(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = dst * (1.0 - src.a)
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachDstOut(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst.rgb = src.rgb * dst.a + (1.0 - src.a) * dst.rgb
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachSrcAtop(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst.rgb = dst.rgb * src.a + (1.0 - dst.a) * src.rgb
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachDstAtop(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = {src.r ^ dst.r, src.g ^ dst.g, src.b ^ dst.b, src.a ^ dst.a}
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachXor(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = src * dst
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachMultiply(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = min(src + dst, 1.0)
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachAdd(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Sets dst = max(dst - src, 0.0)
+ * @param[in] in input Allocation
+ * @param[in] out output Allocation
+ */
+ void forEachSubtract(const sp<Allocation>& in, const sp<Allocation>& out);
+};
+
+/**
+ * Intrinsic Gausian blur filter. Applies a Gaussian blur of the specified
+ * radius to all elements of an Allocation.
+ */
+class ScriptIntrinsicBlur : public ScriptIntrinsic {
+ private:
+ ScriptIntrinsicBlur(sp<RS> rs, sp<const Element> e);
+ public:
+ /**
+ * Supported Element types are U8 and U8_4.
+ * @param[in] rs RenderScript context
+ * @param[in] e Element
+ * @return new ScriptIntrinsicBlur
+ */
+ static sp<ScriptIntrinsicBlur> create(const sp<RS>& rs, const sp<const Element>& e);
+ /**
+ * Sets the input of the blur.
+ * @param[in] in input Allocation
+ */
+ void setInput(const sp<Allocation>& in);
+ /**
+ * Runs the intrinsic.
+ * @param[in] output Allocation
+ */
+ void forEach(const sp<Allocation>& out);
+ /**
+ * Sets the radius of the blur. The supported range is 0 < radius <= 25.
+ * @param[in] radius radius of the blur
+ */
+ void setRadius(float radius);
+};
+
+/**
+ * Intrinsic for applying a color matrix to allocations. This has the
+ * same effect as loading each element and converting it to a
+ * F32_N, multiplying the result by the 4x4 color matrix
+ * as performed by rsMatrixMultiply() and writing it to the output
+ * after conversion back to U8_N or F32_N.
+ */
+class ScriptIntrinsicColorMatrix : public ScriptIntrinsic {
+ private:
+ ScriptIntrinsicColorMatrix(sp<RS> rs, sp<const Element> e);
+ public:
+ /**
+ * Creates a new intrinsic.
+ * @param[in] rs RenderScript context
+ * @return new ScriptIntrinsicColorMatrix
+ */
+ static sp<ScriptIntrinsicColorMatrix> create(const sp<RS>& rs);
+ /**
+ * Applies the color matrix. Supported types are U8 and F32 with
+ * vector lengths between 1 and 4.
+ * @param[in] in input Allocation
+ * @param[out] out output Allocation
+ */
+ void forEach(const sp<Allocation>& in, const sp<Allocation>& out);
+ /**
+ * Set the value to be added after the color matrix has been
+ * applied. The default value is {0, 0, 0, 0}.
+ * @param[in] add float[4] of values
+ */
+ void setAdd(float* add);
+
+ /**
+ * Set the color matrix which will be applied to each cell of the
+ * image. The alpha channel will be copied.
+ *
+ * @param[in] m float[9] of values
+ */
+ void setColorMatrix3(float* m);
+ /**
+ * Set the color matrix which will be applied to each cell of the
+ * image.
+ *
+ * @param[in] m float[16] of values
+ */
+ void setColorMatrix4(float* m);
+ /**
+ * Set a color matrix to convert from RGB to luminance. The alpha
+ * channel will be a copy.
+ */
+ void setGreyscale();
+ /**
+ * Set the matrix to convert from RGB to YUV with a direct copy of
+ * the 4th channel.
+ */
+ void setRGBtoYUV();
+ /**
+ * Set the matrix to convert from YUV to RGB with a direct copy of
+ * the 4th channel.
+ */
+ void setYUVtoRGB();
+};
+
+/**
+ * Intrinsic for applying a 3x3 convolve to an allocation.
+ */
+class ScriptIntrinsicConvolve3x3 : public ScriptIntrinsic {
+ private:
+ ScriptIntrinsicConvolve3x3(sp<RS> rs, sp<const Element> e);
+ public:
+ /**
+ * Supported types U8 and F32 with vector lengths between 1 and
+ * 4. The default convolution kernel is the identity.
+ * @param[in] rs RenderScript context
+ * @param[in] e Element
+ * @return new ScriptIntrinsicConvolve3x3
+ */
+ static sp<ScriptIntrinsicConvolve3x3> create(const sp<RS>& rs, const sp<const Element>& e);
+ /**
+ * Sets input for intrinsic.
+ * @param[in] in input Allocation
+ */
+ void setInput(const sp<Allocation>& in);
+ /**
+ * Launches the intrinsic.
+ * @param[in] out output Allocation
+ */
+ void forEach(const sp<Allocation>& out);
+ /**
+ * Sets convolution kernel.
+ * @param[in] v float[9] of values
+ */
+ void setCoefficients(float* v);
+};
+
+/**
+ * Intrinsic for applying a 5x5 convolve to an allocation.
+ */
+class ScriptIntrinsicConvolve5x5 : public ScriptIntrinsic {
+ private:
+ ScriptIntrinsicConvolve5x5(sp<RS> rs, sp<const Element> e);
+ public:
+ /**
+ * Supported types U8 and F32 with vector lengths between 1 and
+ * 4. The default convolution kernel is the identity.
+ * @param[in] rs RenderScript context
+ * @param[in] e Element
+ * @return new ScriptIntrinsicConvolve5x5
+ */
+ static sp<ScriptIntrinsicConvolve5x5> create(const sp<RS>& rs, const sp<const Element>& e);
+ /**
+ * Sets input for intrinsic.
+ * @param[in] in input Allocation
+ */
+ void setInput(const sp<Allocation>& in);
+ /**
+ * Launches the intrinsic.
+ * @param[in] out output Allocation
+ */
+ void forEach(const sp<Allocation>& out);
+ /**
+ * Sets convolution kernel.
+ * @param[in] v float[25] of values
+ */
+ void setCoefficients(float* v);
+};
+
+/**
+ * Intrinsic for computing a histogram.
+ */
+class ScriptIntrinsicHistogram : public ScriptIntrinsic {
+ private:
+ ScriptIntrinsicHistogram(sp<RS> rs, sp<const Element> e);
+ sp<Allocation> mOut;
+ public:
+ /**
+ * Create an intrinsic for calculating the histogram of an uchar
+ * or uchar4 image.
+ *
+ * Supported elements types are U8_4, U8_3, U8_2, and U8.
+ *
+ * @param[in] rs The RenderScript context
+ * @param[in] e Element type for inputs
+ *
+ * @return ScriptIntrinsicHistogram
+ */
+ static sp<ScriptIntrinsicHistogram> create(const sp<RS>& rs, const sp<const Element>& e);
+ /**
+ * Set the output of the histogram. 32 bit integer types are
+ * supported.
+ *
+ * @param[in] aout The output allocation
+ */
+ void setOutput(const sp<Allocation>& aout);
+ /**
+ * Set the coefficients used for the dot product calculation. The
+ * default is {0.299f, 0.587f, 0.114f, 0.f}.
+ *
+ * Coefficients must be >= 0 and sum to 1.0 or less.
+ *
+ * @param[in] r Red coefficient
+ * @param[in] g Green coefficient
+ * @param[in] b Blue coefficient
+ * @param[in] a Alpha coefficient
+ */
+ void setDotCoefficients(float r, float g, float b, float a);
+ /**
+ * Process an input buffer and place the histogram into the output
+ * allocation. The output allocation may be a narrower vector size
+ * than the input. In this case the vector size of the output is
+ * used to determine how many of the input channels are used in
+ * the computation. This is useful if you have an RGBA input
+ * buffer but only want the histogram for RGB.
+ *
+ * 1D and 2D input allocations are supported.
+ *
+ * @param[in] ain The input image
+ */
+ void forEach(const sp<Allocation>& ain);
+ /**
+ * Process an input buffer and place the histogram into the output
+ * allocation. The dot product of the input channel and the
+ * coefficients from 'setDotCoefficients' are used to calculate
+ * the output values.
+ *
+ * 1D and 2D input allocations are supported.
+ *
+ * @param ain The input image
+ */
+ void forEach_dot(const sp<Allocation>& ain);
+};
+
+/**
+ * Intrinsic for applying a per-channel lookup table. Each channel of
+ * the input has an independant lookup table. The tables are 256
+ * entries in size and can cover the full value range of U8_4.
+ **/
+class ScriptIntrinsicLUT : public ScriptIntrinsic {
+ private:
+ sp<Allocation> LUT;
+ bool mDirty;
+ unsigned char mCache[1024];
+ void setTable(unsigned int offset, unsigned char base, unsigned int length, unsigned char* lutValues);
+ ScriptIntrinsicLUT(sp<RS> rs, sp<const Element> e);
+
+ public:
+ /**
+ * Supported elements types are U8_4.
+ *
+ * The defaults tables are identity.
+ *
+ * @param[in] rs The RenderScript context
+ * @param[in] e Element type for intputs and outputs
+ *
+ * @return ScriptIntrinsicLUT
+ */
+ static sp<ScriptIntrinsicLUT> create(const sp<RS>& rs, const sp<const Element>& e);
+ /**
+ * Invoke the kernel and apply the lookup to each cell of ain and
+ * copy to aout.
+ *
+ * @param[in] ain Input allocation
+ * @param[in] aout Output allocation
+ */
+ void forEach(const sp<Allocation>& ain, const sp<Allocation>& aout);
+ /**
+ * Sets entries in LUT for the red channel.
+ * @param[in] base base of region to update
+ * @param[in] length length of region to update
+ * @param[in] lutValues LUT values to use
+ */
+ void setRed(unsigned char base, unsigned int length, unsigned char* lutValues);
+ /**
+ * Sets entries in LUT for the green channel.
+ * @param[in] base base of region to update
+ * @param[in] length length of region to update
+ * @param[in] lutValues LUT values to use
+ */
+ void setGreen(unsigned char base, unsigned int length, unsigned char* lutValues);
+ /**
+ * Sets entries in LUT for the blue channel.
+ * @param[in] base base of region to update
+ * @param[in] length length of region to update
+ * @param[in] lutValues LUT values to use
+ */
+ void setBlue(unsigned char base, unsigned int length, unsigned char* lutValues);
+ /**
+ * Sets entries in LUT for the alpha channel.
+ * @param[in] base base of region to update
+ * @param[in] length length of region to update
+ * @param[in] lutValues LUT values to use
+ */
+ void setAlpha(unsigned char base, unsigned int length, unsigned char* lutValues);
+ virtual ~ScriptIntrinsicLUT();
+};
+
+/**
+ * Intrinsic for performing a resize of a 2D allocation.
+ */
+class ScriptIntrinsicResize : public ScriptIntrinsic {
+ private:
+ sp<Allocation> mInput;
+ ScriptIntrinsicResize(sp<RS> rs, sp<const Element> e);
+ public:
+ /**
+ * Supported Element types are U8_4. Default lookup table is identity.
+ * @param[in] rs RenderScript context
+ * @param[in] e Element
+ * @return new ScriptIntrinsic
+ */
+ static sp<ScriptIntrinsicResize> create(const sp<RS>& rs);
+
+ /**
+ * Resize copy the input allocation to the output specified. The
+ * Allocation is rescaled if necessary using bi-cubic
+ * interpolation.
+ * @param[in] ain input Allocation
+ * @param[in] aout output Allocation
+ */
+ void forEach_bicubic(const sp<Allocation>& aout);
+
+ /**
+ * Set the input of the resize.
+ * @param[in] lut new lookup table
+ */
+ void setInput(const sp<Allocation>& ain);
+};
+
+/**
+ * Intrinsic for converting an Android YUV buffer to RGB.
+ *
+ * The input allocation should be supplied in a supported YUV format
+ * as a YUV element Allocation. The output is RGBA; the alpha channel
+ * will be set to 255.
+ */
+class ScriptIntrinsicYuvToRGB : public ScriptIntrinsic {
+ private:
+ ScriptIntrinsicYuvToRGB(sp<RS> rs, sp<const Element> e);
+ public:
+ /**
+ * Create an intrinsic for converting YUV to RGB.
+ *
+ * Supported elements types are U8_4.
+ *
+ * @param[in] rs The RenderScript context
+ * @param[in] e Element type for output
+ *
+ * @return ScriptIntrinsicYuvToRGB
+ */
+ static sp<ScriptIntrinsicYuvToRGB> create(const sp<RS>& rs, const sp<const Element>& e);
+ /**
+ * Set the input YUV allocation.
+ *
+ * @param[in] ain The input allocation.
+ */
+ void setInput(const sp<Allocation>& in);
+
+ /**
+ * Convert the image to RGB.
+ *
+ * @param[in] aout Output allocation. Must match creation element
+ * type.
+ */
+ void forEach(const sp<Allocation>& out);
+
+};
+
+/**
+ * Sampler object that defines how Allocations can be read as textures
+ * within a kernel. Samplers are used in conjunction with the rsSample
+ * runtime function to return values from normalized coordinates.
+ *
+ * Any Allocation used with a Sampler must have been created with
+ * RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE; using a Sampler on an
+ * Allocation that was not created with
+ * RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE is undefined.
+ **/
+ class Sampler : public BaseObj {
+ private:
+ Sampler(sp<RS> rs, void* id);
+ Sampler(sp<RS> rs, void* id, RsSamplerValue min, RsSamplerValue mag,
+ RsSamplerValue wrapS, RsSamplerValue wrapT, float anisotropy);
+ RsSamplerValue mMin;
+ RsSamplerValue mMag;
+ RsSamplerValue mWrapS;
+ RsSamplerValue mWrapT;
+ float mAniso;
+
+ public:
+ /**
+ * Creates a non-standard Sampler.
+ * @param[in] rs RenderScript context
+ * @param[in] min minification
+ * @param[in] mag magnification
+ * @param[in] wrapS S wrapping mode
+ * @param[in] wrapT T wrapping mode
+ * @param[in] anisotropy anisotropy setting
+ */
+ static sp<Sampler> create(const sp<RS>& rs, RsSamplerValue min, RsSamplerValue mag, RsSamplerValue wrapS, RsSamplerValue wrapT, float anisotropy);
+
+ /**
+ * @return minification setting for the sampler
+ */
+ RsSamplerValue getMinification();
+ /**
+ * @return magnification setting for the sampler
+ */
+ RsSamplerValue getMagnification();
+ /**
+ * @return S wrapping mode for the sampler
+ */
+ RsSamplerValue getWrapS();
+ /**
+ * @return T wrapping mode for the sampler
+ */
+ RsSamplerValue getWrapT();
+ /**
+ * @return anisotropy setting for the sampler
+ */
+ float getAnisotropy();
+
+ /**
+ * Retrieve a sampler with min and mag set to nearest and wrap modes set to
+ * clamp.
+ *
+ * @param rs Context to which the sampler will belong.
+ *
+ * @return Sampler
+ */
+ static sp<const Sampler> CLAMP_NEAREST(const sp<RS> &rs);
+ /**
+ * Retrieve a sampler with min and mag set to linear and wrap modes set to
+ * clamp.
+ *
+ * @param rs Context to which the sampler will belong.
+ *
+ * @return Sampler
+ */
+ static sp<const Sampler> CLAMP_LINEAR(const sp<RS> &rs);
+ /**
+ * Retrieve a sampler with mag set to linear, min linear mipmap linear, and
+ * wrap modes set to clamp.
+ *
+ * @param rs Context to which the sampler will belong.
+ *
+ * @return Sampler
+ */
+ static sp<const Sampler> CLAMP_LINEAR_MIP_LINEAR(const sp<RS> &rs);
+ /**
+ * Retrieve a sampler with min and mag set to nearest and wrap modes set to
+ * wrap.
+ *
+ * @param rs Context to which the sampler will belong.
+ *
+ * @return Sampler
+ */
+ static sp<const Sampler> WRAP_NEAREST(const sp<RS> &rs);
+ /**
+ * Retrieve a sampler with min and mag set to linear and wrap modes set to
+ * wrap.
+ *
+ * @param rs Context to which the sampler will belong.
+ *
+ * @return Sampler
+ */
+ static sp<const Sampler> WRAP_LINEAR(const sp<RS> &rs);
+ /**
+ * Retrieve a sampler with mag set to linear, min linear mipmap linear, and
+ * wrap modes set to wrap.
+ *
+ * @param rs Context to which the sampler will belong.
+ *
+ * @return Sampler
+ */
+ static sp<const Sampler> WRAP_LINEAR_MIP_LINEAR(const sp<RS> &rs);
+ /**
+ * Retrieve a sampler with min and mag set to nearest and wrap modes set to
+ * mirrored repeat.
+ *
+ * @param rs Context to which the sampler will belong.
+ *
+ * @return Sampler
+ */
+ static sp<const Sampler> MIRRORED_REPEAT_NEAREST(const sp<RS> &rs);
+ /**
+ * Retrieve a sampler with min and mag set to linear and wrap modes set to
+ * mirrored repeat.
+ *
+ * @param rs Context to which the sampler will belong.
+ *
+ * @return Sampler
+ */
+ static sp<const Sampler> MIRRORED_REPEAT_LINEAR(const sp<RS> &rs);
+ /**
+ * Retrieve a sampler with min and mag set to linear and wrap modes set to
+ * mirrored repeat.
+ *
+ * @param rs Context to which the sampler will belong.
+ *
+ * @return Sampler
+ */
+ static sp<const Sampler> MIRRORED_REPEAT_LINEAR_MIP_LINEAR(const sp<RS> &rs);
+
+};
+
+}
+
+}
+
+#endif
diff --git a/current/platform/rs/cpp/util/RefBase.h b/current/platform/rs/cpp/util/RefBase.h
new file mode 100644
index 0000000..40bb7bc
--- /dev/null
+++ b/current/platform/rs/cpp/util/RefBase.h
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RS_REF_BASE_H
+#define RS_REF_BASE_H
+
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "StrongPointer.h"
+#include "TypeHelpers.h"
+
+// ---------------------------------------------------------------------------
+namespace android{
+namespace RSC {
+
+class TextOutput;
+TextOutput& printWeakPointer(TextOutput& to, const void* val);
+
+// ---------------------------------------------------------------------------
+
+#define COMPARE_WEAK(_op_) \
+inline bool operator _op_ (const sp<T>& o) const { \
+ return m_ptr _op_ o.m_ptr; \
+} \
+inline bool operator _op_ (const T* o) const { \
+ return m_ptr _op_ o; \
+} \
+template<typename U> \
+inline bool operator _op_ (const sp<U>& o) const { \
+ return m_ptr _op_ o.m_ptr; \
+} \
+template<typename U> \
+inline bool operator _op_ (const U* o) const { \
+ return m_ptr _op_ o; \
+}
+
+// ---------------------------------------------------------------------------
+class ReferenceMover;
+class ReferenceConverterBase {
+public:
+ virtual size_t getReferenceTypeSize() const = 0;
+ virtual void* getReferenceBase(void const*) const = 0;
+ inline virtual ~ReferenceConverterBase() { }
+};
+
+// ---------------------------------------------------------------------------
+
+class RefBase
+{
+public:
+ void incStrong(const void* id) const;
+ void decStrong(const void* id) const;
+
+ void forceIncStrong(const void* id) const;
+
+ //! DEBUGGING ONLY: Get current strong ref count.
+ int32_t getStrongCount() const;
+
+ class weakref_type
+ {
+ public:
+ RefBase* refBase() const;
+
+ void incWeak(const void* id);
+ void decWeak(const void* id);
+
+ // acquires a strong reference if there is already one.
+ bool attemptIncStrong(const void* id);
+
+ // acquires a weak reference if there is already one.
+ // This is not always safe. see ProcessState.cpp and BpBinder.cpp
+ // for proper use.
+ bool attemptIncWeak(const void* id);
+
+ //! DEBUGGING ONLY: Get current weak ref count.
+ int32_t getWeakCount() const;
+
+ //! DEBUGGING ONLY: Print references held on object.
+ void printRefs() const;
+
+ //! DEBUGGING ONLY: Enable tracking for this object.
+ // enable -- enable/disable tracking
+ // retain -- when tracking is enable, if true, then we save a stack trace
+ // for each reference and dereference; when retain == false, we
+ // match up references and dereferences and keep only the
+ // outstanding ones.
+
+ void trackMe(bool enable, bool retain);
+ };
+
+ weakref_type* createWeak(const void* id) const;
+
+ weakref_type* getWeakRefs() const;
+
+ //! DEBUGGING ONLY: Print references held on object.
+ inline void printRefs() const { getWeakRefs()->printRefs(); }
+
+ //! DEBUGGING ONLY: Enable tracking of object.
+ inline void trackMe(bool enable, bool retain)
+ {
+ getWeakRefs()->trackMe(enable, retain);
+ }
+
+ typedef RefBase basetype;
+
+protected:
+ RefBase();
+ virtual ~RefBase();
+
+ //! Flags for extendObjectLifetime()
+ enum {
+ OBJECT_LIFETIME_STRONG = 0x0000,
+ OBJECT_LIFETIME_WEAK = 0x0001,
+ OBJECT_LIFETIME_MASK = 0x0001
+ };
+
+ void extendObjectLifetime(int32_t mode);
+
+ //! Flags for onIncStrongAttempted()
+ enum {
+ FIRST_INC_STRONG = 0x0001
+ };
+
+ virtual void onFirstRef();
+ virtual void onLastStrongRef(const void* id);
+ virtual bool onIncStrongAttempted(uint32_t flags, const void* id);
+ virtual void onLastWeakRef(const void* id);
+
+private:
+ friend class ReferenceMover;
+ static void moveReferences(void* d, void const* s, size_t n,
+ const ReferenceConverterBase& caster);
+
+private:
+ friend class weakref_type;
+ class weakref_impl;
+
+ RefBase(const RefBase& o);
+ RefBase& operator=(const RefBase& o);
+
+ weakref_impl* const mRefs;
+};
+
+// ---------------------------------------------------------------------------
+
+template <class T>
+class LightRefBase
+{
+public:
+ inline LightRefBase() : mCount(0) { }
+ inline void incStrong(__attribute__((unused)) const void* id) const {
+ __sync_fetch_and_add(&mCount, 1);
+ }
+ inline void decStrong(__attribute__((unused)) const void* id) const {
+ if (__sync_fetch_and_sub(&mCount, 1) == 1) {
+ delete static_cast<const T*>(this);
+ }
+ }
+ //! DEBUGGING ONLY: Get current strong ref count.
+ inline int32_t getStrongCount() const {
+ return mCount;
+ }
+
+ typedef LightRefBase<T> basetype;
+
+protected:
+ inline ~LightRefBase() { }
+
+private:
+ friend class ReferenceMover;
+ inline static void moveReferences(void*, void const*, size_t,
+ const ReferenceConverterBase&) { }
+
+private:
+ mutable volatile int32_t mCount;
+};
+
+// ---------------------------------------------------------------------------
+
+template <typename T>
+class wp
+{
+public:
+ typedef typename RefBase::weakref_type weakref_type;
+
+ inline wp() : m_ptr(0) { }
+
+ explicit wp(T* other);
+ wp(const wp<T>& other);
+ explicit wp(const sp<T>& other);
+ template<typename U> explicit wp(U* other);
+ template<typename U> explicit wp(const sp<U>& other);
+ template<typename U> explicit wp(const wp<U>& other);
+
+ ~wp();
+
+ // Assignment
+
+ wp& operator = (T* other);
+ wp& operator = (const wp<T>& other);
+ wp& operator = (const sp<T>& other);
+
+ template<typename U> wp& operator = (U* other);
+ template<typename U> wp& operator = (const wp<U>& other);
+ template<typename U> wp& operator = (const sp<U>& other);
+
+ void set_object_and_refs(T* other, weakref_type* refs);
+
+ // promotion to sp
+
+ sp<T> promote() const;
+
+ // Reset
+
+ void clear();
+
+ // Accessors
+
+ inline weakref_type* get_refs() const { return m_refs; }
+
+ inline T* unsafe_get() const { return m_ptr; }
+
+ // Operators
+
+ COMPARE_WEAK(==)
+ COMPARE_WEAK(!=)
+ COMPARE_WEAK(>)
+ COMPARE_WEAK(<)
+ COMPARE_WEAK(<=)
+ COMPARE_WEAK(>=)
+
+ inline bool operator == (const wp<T>& o) const {
+ return (m_ptr == o.m_ptr) && (m_refs == o.m_refs);
+ }
+ template<typename U>
+ inline bool operator == (const wp<U>& o) const {
+ return m_ptr == o.m_ptr;
+ }
+
+ inline bool operator > (const wp<T>& o) const {
+ return (m_ptr == o.m_ptr) ? (m_refs > o.m_refs) : (m_ptr > o.m_ptr);
+ }
+ template<typename U>
+ inline bool operator > (const wp<U>& o) const {
+ return (m_ptr == o.m_ptr) ? (m_refs > o.m_refs) : (m_ptr > o.m_ptr);
+ }
+
+ inline bool operator < (const wp<T>& o) const {
+ return (m_ptr == o.m_ptr) ? (m_refs < o.m_refs) : (m_ptr < o.m_ptr);
+ }
+ template<typename U>
+ inline bool operator < (const wp<U>& o) const {
+ return (m_ptr == o.m_ptr) ? (m_refs < o.m_refs) : (m_ptr < o.m_ptr);
+ }
+ inline bool operator != (const wp<T>& o) const { return m_refs != o.m_refs; }
+ template<typename U> inline bool operator != (const wp<U>& o) const { return !operator == (o); }
+ inline bool operator <= (const wp<T>& o) const { return !operator > (o); }
+ template<typename U> inline bool operator <= (const wp<U>& o) const { return !operator > (o); }
+ inline bool operator >= (const wp<T>& o) const { return !operator < (o); }
+ template<typename U> inline bool operator >= (const wp<U>& o) const { return !operator < (o); }
+
+private:
+ template<typename Y> friend class sp;
+ template<typename Y> friend class wp;
+
+ T* m_ptr;
+ weakref_type* m_refs;
+};
+
+template <typename T>
+TextOutput& operator<<(TextOutput& to, const wp<T>& val);
+
+#undef COMPARE_WEAK
+
+// ---------------------------------------------------------------------------
+// No user serviceable parts below here.
+
+template<typename T>
+wp<T>::wp(T* other)
+ : m_ptr(other)
+{
+ if (other) m_refs = other->createWeak(this);
+}
+
+template<typename T>
+wp<T>::wp(const wp<T>& other)
+ : m_ptr(other.m_ptr), m_refs(other.m_refs)
+{
+ if (m_ptr) m_refs->incWeak(this);
+}
+
+template<typename T>
+wp<T>::wp(const sp<T>& other)
+ : m_ptr(other.m_ptr)
+{
+ if (m_ptr) {
+ m_refs = m_ptr->createWeak(this);
+ }
+}
+
+template<typename T> template<typename U>
+wp<T>::wp(U* other)
+ : m_ptr(other)
+{
+ if (other) m_refs = other->createWeak(this);
+}
+
+template<typename T> template<typename U>
+wp<T>::wp(const wp<U>& other)
+ : m_ptr(other.m_ptr)
+{
+ if (m_ptr) {
+ m_refs = other.m_refs;
+ m_refs->incWeak(this);
+ }
+}
+
+template<typename T> template<typename U>
+wp<T>::wp(const sp<U>& other)
+ : m_ptr(other.m_ptr)
+{
+ if (m_ptr) {
+ m_refs = m_ptr->createWeak(this);
+ }
+}
+
+template<typename T>
+wp<T>::~wp()
+{
+ if (m_ptr) m_refs->decWeak(this);
+}
+
+template<typename T>
+wp<T>& wp<T>::operator = (T* other)
+{
+ weakref_type* newRefs =
+ other ? other->createWeak(this) : 0;
+ if (m_ptr) m_refs->decWeak(this);
+ m_ptr = other;
+ m_refs = newRefs;
+ return *this;
+}
+
+template<typename T>
+wp<T>& wp<T>::operator = (const wp<T>& other)
+{
+ weakref_type* otherRefs(other.m_refs);
+ T* otherPtr(other.m_ptr);
+ if (otherPtr) otherRefs->incWeak(this);
+ if (m_ptr) m_refs->decWeak(this);
+ m_ptr = otherPtr;
+ m_refs = otherRefs;
+ return *this;
+}
+
+template<typename T>
+wp<T>& wp<T>::operator = (const sp<T>& other)
+{
+ weakref_type* newRefs =
+ other != NULL ? other->createWeak(this) : NULL;
+ T* otherPtr(other.m_ptr);
+ if (m_ptr) m_refs->decWeak(this);
+ m_ptr = otherPtr;
+ m_refs = newRefs;
+ return *this;
+}
+
+template<typename T> template<typename U>
+wp<T>& wp<T>::operator = (U* other)
+{
+ weakref_type* newRefs =
+ other ? other->createWeak(this) : NULL;
+ if (m_ptr) m_refs->decWeak(this);
+ m_ptr = other;
+ m_refs = newRefs;
+ return *this;
+}
+
+template<typename T> template<typename U>
+wp<T>& wp<T>::operator = (const wp<U>& other)
+{
+ weakref_type* otherRefs(other.m_refs);
+ U* otherPtr(other.m_ptr);
+ if (otherPtr) otherRefs->incWeak(this);
+ if (m_ptr) m_refs->decWeak(this);
+ m_ptr = otherPtr;
+ m_refs = otherRefs;
+ return *this;
+}
+
+template<typename T> template<typename U>
+wp<T>& wp<T>::operator = (const sp<U>& other)
+{
+ weakref_type* newRefs =
+ other != NULL ? other->createWeak(this) : NULL;
+ U* otherPtr(other.m_ptr);
+ if (m_ptr) m_refs->decWeak(this);
+ m_ptr = otherPtr;
+ m_refs = newRefs;
+ return *this;
+}
+
+template<typename T>
+void wp<T>::set_object_and_refs(T* other, weakref_type* refs)
+{
+ if (other) refs->incWeak(this);
+ if (m_ptr) m_refs->decWeak(this);
+ m_ptr = other;
+ m_refs = refs;
+}
+
+template<typename T>
+sp<T> wp<T>::promote() const
+{
+ sp<T> result;
+ if (m_ptr && m_refs->attemptIncStrong(&result)) {
+ result.set_pointer(m_ptr);
+ }
+ return result;
+}
+
+template<typename T>
+void wp<T>::clear()
+{
+ if (m_ptr) {
+ m_refs->decWeak(this);
+ m_ptr = 0;
+ }
+}
+
+template <typename T>
+inline TextOutput& operator<<(TextOutput& to, const wp<T>& val)
+{
+ return printWeakPointer(to, val.unsafe_get());
+}
+
+// ---------------------------------------------------------------------------
+
+// this class just serves as a namespace so TYPE::moveReferences can stay
+// private.
+
+class ReferenceMover {
+ // StrongReferenceCast and WeakReferenceCast do the impedance matching
+ // between the generic (void*) implementation in Refbase and the strongly typed
+ // template specializations below.
+
+ template <typename TYPE>
+ struct StrongReferenceCast : public ReferenceConverterBase {
+ virtual size_t getReferenceTypeSize() const { return sizeof( sp<TYPE> ); }
+ virtual void* getReferenceBase(void const* p) const {
+ sp<TYPE> const* sptr(reinterpret_cast<sp<TYPE> const*>(p));
+ return static_cast<typename TYPE::basetype *>(sptr->get());
+ }
+ };
+
+ template <typename TYPE>
+ struct WeakReferenceCast : public ReferenceConverterBase {
+ virtual size_t getReferenceTypeSize() const { return sizeof( wp<TYPE> ); }
+ virtual void* getReferenceBase(void const* p) const {
+ wp<TYPE> const* sptr(reinterpret_cast<wp<TYPE> const*>(p));
+ return static_cast<typename TYPE::basetype *>(sptr->unsafe_get());
+ }
+ };
+
+public:
+ template<typename TYPE> static inline
+ void move_references(sp<TYPE>* d, sp<TYPE> const* s, size_t n) {
+ memmove(d, s, n*sizeof(sp<TYPE>));
+ StrongReferenceCast<TYPE> caster;
+ TYPE::moveReferences(d, s, n, caster);
+ }
+ template<typename TYPE> static inline
+ void move_references(wp<TYPE>* d, wp<TYPE> const* s, size_t n) {
+ memmove(d, s, n*sizeof(wp<TYPE>));
+ WeakReferenceCast<TYPE> caster;
+ TYPE::moveReferences(d, s, n, caster);
+ }
+};
+
+// specialization for moving sp<> and wp<> types.
+// these are used by the [Sorted|Keyed]Vector<> implementations
+// sp<> and wp<> need to be handled specially, because they do not
+// have trivial copy operation in the general case (see RefBase.cpp
+// when DEBUG ops are enabled), but can be implemented very
+// efficiently in most cases.
+
+template<typename TYPE> inline
+void move_forward_type(sp<TYPE>* d, sp<TYPE> const* s, size_t n) {
+ ReferenceMover::move_references(d, s, n);
+}
+
+template<typename TYPE> inline
+void move_backward_type(sp<TYPE>* d, sp<TYPE> const* s, size_t n) {
+ ReferenceMover::move_references(d, s, n);
+}
+
+template<typename TYPE> inline
+void move_forward_type(wp<TYPE>* d, wp<TYPE> const* s, size_t n) {
+ ReferenceMover::move_references(d, s, n);
+}
+
+template<typename TYPE> inline
+void move_backward_type(wp<TYPE>* d, wp<TYPE> const* s, size_t n) {
+ ReferenceMover::move_references(d, s, n);
+}
+
+
+}; // namespace RSC
+}; // namespace android
+// ---------------------------------------------------------------------------
+
+#endif // RS_REF_BASE_H
diff --git a/current/platform/rs/cpp/util/StrongPointer.h b/current/platform/rs/cpp/util/StrongPointer.h
new file mode 100644
index 0000000..a9995ba
--- /dev/null
+++ b/current/platform/rs/cpp/util/StrongPointer.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RS_STRONG_POINTER_H
+#define RS_STRONG_POINTER_H
+
+//#include <cutils/atomic.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <stdlib.h>
+
+// ---------------------------------------------------------------------------
+namespace android {
+namespace RSC {
+
+class TextOutput;
+TextOutput& printStrongPointer(TextOutput& to, const void* val);
+
+template<typename T> class wp;
+
+// ---------------------------------------------------------------------------
+
+#define COMPARE(_op_) \
+inline bool operator _op_ (const sp<T>& o) const { \
+ return m_ptr _op_ o.m_ptr; \
+} \
+inline bool operator _op_ (const T* o) const { \
+ return m_ptr _op_ o; \
+} \
+template<typename U> \
+inline bool operator _op_ (const sp<U>& o) const { \
+ return m_ptr _op_ o.m_ptr; \
+} \
+template<typename U> \
+inline bool operator _op_ (const U* o) const { \
+ return m_ptr _op_ o; \
+} \
+inline bool operator _op_ (const wp<T>& o) const { \
+ return m_ptr _op_ o.m_ptr; \
+} \
+template<typename U> \
+inline bool operator _op_ (const wp<U>& o) const { \
+ return m_ptr _op_ o.m_ptr; \
+}
+
+// ---------------------------------------------------------------------------
+
+template <typename T>
+class sp
+{
+public:
+ inline sp() : m_ptr(0) { }
+
+ sp(T* other); // NOLINT, implicit
+ sp(const sp<T>& other);
+ template<typename U> sp(U* other); // NOLINT, implicit
+ template<typename U> sp(const sp<U>& other); // NOLINT, implicit
+
+ ~sp();
+
+ // Assignment
+
+ sp& operator = (T* other);
+ sp& operator = (const sp<T>& other);
+
+ template<typename U> sp& operator = (const sp<U>& other);
+ template<typename U> sp& operator = (U* other);
+
+ //! Special optimization for use by ProcessState (and nobody else).
+ void force_set(T* other);
+
+ // Reset
+
+ void clear();
+
+ // Accessors
+
+ inline T& operator* () const { return *m_ptr; }
+ inline T* operator-> () const { return m_ptr; }
+ inline T* get() const { return m_ptr; }
+
+ // Operators
+
+ COMPARE(==)
+ COMPARE(!=)
+ COMPARE(>)
+ COMPARE(<)
+ COMPARE(<=)
+ COMPARE(>=)
+
+private:
+ template<typename Y> friend class sp;
+ template<typename Y> friend class wp;
+ void set_pointer(T* ptr);
+ T* m_ptr;
+};
+
+#undef COMPARE
+
+template <typename T>
+TextOutput& operator<<(TextOutput& to, const sp<T>& val);
+
+// ---------------------------------------------------------------------------
+// No user serviceable parts below here.
+
+template<typename T>
+sp<T>::sp(T* other)
+: m_ptr(other)
+ {
+ if (other) other->incStrong(this);
+ }
+
+template<typename T>
+sp<T>::sp(const sp<T>& other)
+: m_ptr(other.m_ptr)
+ {
+ if (m_ptr) m_ptr->incStrong(this);
+ }
+
+template<typename T> template<typename U>
+sp<T>::sp(U* other) : m_ptr(other)
+{
+ if (other) ((T*)other)->incStrong(this);
+}
+
+template<typename T> template<typename U>
+sp<T>::sp(const sp<U>& other)
+: m_ptr(other.m_ptr)
+ {
+ if (m_ptr) m_ptr->incStrong(this);
+ }
+
+template<typename T>
+sp<T>::~sp()
+{
+ if (m_ptr) m_ptr->decStrong(this);
+}
+
+template<typename T>
+sp<T>& sp<T>::operator = (const sp<T>& other) {
+ T* otherPtr(other.m_ptr);
+ if (otherPtr) otherPtr->incStrong(this);
+ if (m_ptr) m_ptr->decStrong(this);
+ m_ptr = otherPtr;
+ return *this;
+}
+
+template<typename T>
+sp<T>& sp<T>::operator = (T* other)
+{
+ if (other) other->incStrong(this);
+ if (m_ptr) m_ptr->decStrong(this);
+ m_ptr = other;
+ return *this;
+}
+
+template<typename T> template<typename U>
+sp<T>& sp<T>::operator = (const sp<U>& other)
+{
+ T* otherPtr(other.m_ptr);
+ if (otherPtr) otherPtr->incStrong(this);
+ if (m_ptr) m_ptr->decStrong(this);
+ m_ptr = otherPtr;
+ return *this;
+}
+
+template<typename T> template<typename U>
+sp<T>& sp<T>::operator = (U* other)
+{
+ if (other) ((T*)other)->incStrong(this);
+ if (m_ptr) m_ptr->decStrong(this);
+ m_ptr = other;
+ return *this;
+}
+
+template<typename T>
+void sp<T>::force_set(T* other)
+{
+ other->forceIncStrong(this);
+ m_ptr = other;
+}
+
+template<typename T>
+void sp<T>::clear()
+{
+ if (m_ptr) {
+ m_ptr->decStrong(this);
+ m_ptr = 0;
+ }
+}
+
+template<typename T>
+void sp<T>::set_pointer(T* ptr) {
+ m_ptr = ptr;
+}
+
+template <typename T>
+inline TextOutput& operator<<(TextOutput& to, const sp<T>& val)
+{
+ return printStrongPointer(to, val.get());
+}
+
+}; // namespace RSC
+}; // namespace android
+
+// ---------------------------------------------------------------------------
+
+#endif // RS_STRONG_POINTER_H
diff --git a/current/platform/rs/cpp/util/TypeHelpers.h b/current/platform/rs/cpp/util/TypeHelpers.h
new file mode 100644
index 0000000..e738cd3
--- /dev/null
+++ b/current/platform/rs/cpp/util/TypeHelpers.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RS_TYPE_HELPERS_H
+#define RS_TYPE_HELPERS_H
+
+#include <new>
+#include <stdint.h>
+#include <string.h>
+#include <sys/types.h>
+
+// ---------------------------------------------------------------------------
+namespace android {
+namespace RSC {
+
+/*
+ * Types traits
+ */
+
+template <typename T> struct trait_trivial_ctor { enum { value = false }; };
+template <typename T> struct trait_trivial_dtor { enum { value = false }; };
+template <typename T> struct trait_trivial_copy { enum { value = false }; };
+template <typename T> struct trait_trivial_move { enum { value = false }; };
+template <typename T> struct trait_pointer { enum { value = false }; };
+template <typename T> struct trait_pointer<T*> { enum { value = true }; };
+
+template <typename TYPE>
+struct traits {
+ enum {
+ // whether this type is a pointer
+ is_pointer = trait_pointer<TYPE>::value,
+ // whether this type's constructor is a no-op
+ has_trivial_ctor = is_pointer || trait_trivial_ctor<TYPE>::value,
+ // whether this type's destructor is a no-op
+ has_trivial_dtor = is_pointer || trait_trivial_dtor<TYPE>::value,
+ // whether this type type can be copy-constructed with memcpy
+ has_trivial_copy = is_pointer || trait_trivial_copy<TYPE>::value,
+ // whether this type can be moved with memmove
+ has_trivial_move = is_pointer || trait_trivial_move<TYPE>::value
+ };
+};
+
+template <typename T, typename U>
+struct aggregate_traits {
+ enum {
+ is_pointer = false,
+ has_trivial_ctor =
+ traits<T>::has_trivial_ctor && traits<U>::has_trivial_ctor,
+ has_trivial_dtor =
+ traits<T>::has_trivial_dtor && traits<U>::has_trivial_dtor,
+ has_trivial_copy =
+ traits<T>::has_trivial_copy && traits<U>::has_trivial_copy,
+ has_trivial_move =
+ traits<T>::has_trivial_move && traits<U>::has_trivial_move
+ };
+};
+
+#define RS_TRIVIAL_CTOR_TRAIT( T ) \
+ template<> struct trait_trivial_ctor< T > { enum { value = true }; };
+
+#define RS_TRIVIAL_DTOR_TRAIT( T ) \
+ template<> struct trait_trivial_dtor< T > { enum { value = true }; };
+
+#define RS_TRIVIAL_COPY_TRAIT( T ) \
+ template<> struct trait_trivial_copy< T > { enum { value = true }; };
+
+#define RS_TRIVIAL_MOVE_TRAIT( T ) \
+ template<> struct trait_trivial_move< T > { enum { value = true }; };
+
+#define RS_BASIC_TYPES_TRAITS( T ) \
+ RS_TRIVIAL_CTOR_TRAIT( T ) \
+ RS_TRIVIAL_DTOR_TRAIT( T ) \
+ RS_TRIVIAL_COPY_TRAIT( T ) \
+ RS_TRIVIAL_MOVE_TRAIT( T )
+
+// ---------------------------------------------------------------------------
+
+/*
+ * basic types traits
+ */
+
+RS_BASIC_TYPES_TRAITS( void )
+RS_BASIC_TYPES_TRAITS( bool )
+RS_BASIC_TYPES_TRAITS( char )
+RS_BASIC_TYPES_TRAITS( unsigned char )
+RS_BASIC_TYPES_TRAITS( short )
+RS_BASIC_TYPES_TRAITS( unsigned short )
+RS_BASIC_TYPES_TRAITS( int )
+RS_BASIC_TYPES_TRAITS( unsigned int )
+RS_BASIC_TYPES_TRAITS( long )
+RS_BASIC_TYPES_TRAITS( unsigned long )
+RS_BASIC_TYPES_TRAITS( long long )
+RS_BASIC_TYPES_TRAITS( unsigned long long )
+RS_BASIC_TYPES_TRAITS( float )
+RS_BASIC_TYPES_TRAITS( double )
+
+// ---------------------------------------------------------------------------
+
+
+/*
+ * compare and order types
+ */
+
+template<typename TYPE> inline
+int strictly_order_type(const TYPE& lhs, const TYPE& rhs) {
+ return (lhs < rhs) ? 1 : 0;
+}
+
+template<typename TYPE> inline
+int compare_type(const TYPE& lhs, const TYPE& rhs) {
+ return strictly_order_type(rhs, lhs) - strictly_order_type(lhs, rhs);
+}
+
+/*
+ * create, destroy, copy and move types...
+ */
+
+template<typename TYPE> inline
+void construct_type(TYPE* p, size_t n) {
+ if (!traits<TYPE>::has_trivial_ctor) {
+ while (n--) {
+ new(p++) TYPE;
+ }
+ }
+}
+
+template<typename TYPE> inline
+void destroy_type(TYPE* p, size_t n) {
+ if (!traits<TYPE>::has_trivial_dtor) {
+ while (n--) {
+ p->~TYPE();
+ p++;
+ }
+ }
+}
+
+template<typename TYPE> inline
+void copy_type(TYPE* d, const TYPE* s, size_t n) {
+ if (!traits<TYPE>::has_trivial_copy) {
+ while (n--) {
+ new(d) TYPE(*s);
+ d++, s++;
+ }
+ } else {
+ memcpy(d,s,n*sizeof(TYPE));
+ }
+}
+
+template<typename TYPE> inline
+void splat_type(TYPE* where, const TYPE* what, size_t n) {
+ if (!traits<TYPE>::has_trivial_copy) {
+ while (n--) {
+ new(where) TYPE(*what);
+ where++;
+ }
+ } else {
+ while (n--) {
+ *where++ = *what;
+ }
+ }
+}
+
+template<typename TYPE> inline
+void move_forward_type(TYPE* d, const TYPE* s, size_t n = 1) {
+ if ((traits<TYPE>::has_trivial_dtor && traits<TYPE>::has_trivial_copy)
+ || traits<TYPE>::has_trivial_move)
+ {
+ memmove(d,s,n*sizeof(TYPE));
+ } else {
+ d += n;
+ s += n;
+ while (n--) {
+ --d, --s;
+ if (!traits<TYPE>::has_trivial_copy) {
+ new(d) TYPE(*s);
+ } else {
+ *d = *s;
+ }
+ if (!traits<TYPE>::has_trivial_dtor) {
+ s->~TYPE();
+ }
+ }
+ }
+}
+
+template<typename TYPE> inline
+void move_backward_type(TYPE* d, const TYPE* s, size_t n = 1) {
+ if ((traits<TYPE>::has_trivial_dtor && traits<TYPE>::has_trivial_copy)
+ || traits<TYPE>::has_trivial_move)
+ {
+ memmove(d,s,n*sizeof(TYPE));
+ } else {
+ while (n--) {
+ if (!traits<TYPE>::has_trivial_copy) {
+ new(d) TYPE(*s);
+ } else {
+ *d = *s;
+ }
+ if (!traits<TYPE>::has_trivial_dtor) {
+ s->~TYPE();
+ }
+ d++, s++;
+ }
+ }
+}
+
+// ---------------------------------------------------------------------------
+
+/*
+ * a key/value pair
+ */
+
+template <typename KEY, typename VALUE>
+struct key_value_pair_t {
+ typedef KEY key_t;
+ typedef VALUE value_t;
+
+ KEY key;
+ VALUE value;
+ key_value_pair_t() { }
+ key_value_pair_t(const key_value_pair_t& o) : key(o.key), value(o.value) { }
+ key_value_pair_t(const KEY& k, const VALUE& v) : key(k), value(v) { }
+ explicit key_value_pair_t(const KEY& k) : key(k) { }
+ inline bool operator < (const key_value_pair_t& o) const {
+ return strictly_order_type(key, o.key);
+ }
+ inline const KEY& getKey() const {
+ return key;
+ }
+ inline const VALUE& getValue() const {
+ return value;
+ }
+};
+
+template <typename K, typename V>
+struct trait_trivial_ctor< key_value_pair_t<K, V> >
+{ enum { value = aggregate_traits<K,V>::has_trivial_ctor }; };
+template <typename K, typename V>
+struct trait_trivial_dtor< key_value_pair_t<K, V> >
+{ enum { value = aggregate_traits<K,V>::has_trivial_dtor }; };
+template <typename K, typename V>
+struct trait_trivial_copy< key_value_pair_t<K, V> >
+{ enum { value = aggregate_traits<K,V>::has_trivial_copy }; };
+template <typename K, typename V>
+struct trait_trivial_move< key_value_pair_t<K, V> >
+{ enum { value = aggregate_traits<K,V>::has_trivial_move }; };
+
+// ---------------------------------------------------------------------------
+
+/*
+ * Hash codes.
+ */
+typedef uint32_t hash_t;
+
+template <typename TKey>
+hash_t hash_type(const TKey& key);
+
+/* Built-in hash code specializations.
+ * Assumes pointers are 32bit. */
+#define RS_INT32_HASH(T) \
+ template <> inline hash_t hash_type(const T& value) { return hash_t(value); }
+#define RS_INT64_HASH(T) \
+ template <> inline hash_t hash_type(const T& value) { \
+ return hash_t((value >> 32) ^ value); }
+#define RS_REINTERPRET_HASH(T, R) \
+ template <> inline hash_t hash_type(const T& value) { \
+ return hash_type(*reinterpret_cast<const R*>(&value)); }
+
+RS_INT32_HASH(bool)
+RS_INT32_HASH(int8_t)
+RS_INT32_HASH(uint8_t)
+RS_INT32_HASH(int16_t)
+RS_INT32_HASH(uint16_t)
+RS_INT32_HASH(int32_t)
+RS_INT32_HASH(uint32_t)
+RS_INT64_HASH(int64_t)
+RS_INT64_HASH(uint64_t)
+RS_REINTERPRET_HASH(float, uint32_t)
+RS_REINTERPRET_HASH(double, uint64_t)
+
+template <typename T> inline hash_t hash_type(T* const & value) {
+ return hash_type(uintptr_t(value));
+}
+
+}; // namespace RSC
+}; // namespace android
+// ---------------------------------------------------------------------------
+
+#endif // RS_TYPE_HELPERS_H
diff --git a/current/platform/rs/rsDefines.h b/current/platform/rs/rsDefines.h
new file mode 100644
index 0000000..4e5179b
--- /dev/null
+++ b/current/platform/rs/rsDefines.h
@@ -0,0 +1,510 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RENDER_SCRIPT_DEFINES_H
+#define RENDER_SCRIPT_DEFINES_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//////////////////////////////////////////////////////
+//
+
+typedef void * RsAsyncVoidPtr;
+
+typedef void * RsAllocation;
+typedef void * RsAnimation;
+typedef void * RsClosure;
+typedef void * RsContext;
+typedef void * RsDevice;
+typedef void * RsElement;
+typedef void * RsFile;
+typedef void * RsFont;
+typedef void * RsSampler;
+typedef void * RsScript;
+typedef void * RsScriptKernelID;
+typedef void * RsScriptInvokeID;
+typedef void * RsScriptFieldID;
+typedef void * RsScriptMethodID;
+typedef void * RsScriptGroup;
+typedef void * RsScriptGroup2;
+typedef void * RsMesh;
+typedef void * RsPath;
+typedef void * RsType;
+typedef void * RsObjectBase;
+
+typedef void * RsProgram;
+typedef void * RsProgramVertex;
+typedef void * RsProgramFragment;
+typedef void * RsProgramStore;
+typedef void * RsProgramRaster;
+
+typedef void * RsNativeWindow;
+
+typedef void (* RsBitmapCallback_t)(void *);
+
+typedef struct {
+ float m[16];
+} rs_matrix4x4;
+
+typedef struct {
+ float m[9];
+} rs_matrix3x3;
+
+typedef struct {
+ float m[4];
+} rs_matrix2x2;
+
+enum RsDeviceParam {
+ RS_DEVICE_PARAM_FORCE_SOFTWARE_GL,
+ RS_DEVICE_PARAM_COUNT
+};
+
+enum RsContextType {
+ RS_CONTEXT_TYPE_NORMAL,
+ RS_CONTEXT_TYPE_DEBUG,
+ RS_CONTEXT_TYPE_PROFILE
+};
+
+
+enum RsAllocationUsageType {
+ RS_ALLOCATION_USAGE_SCRIPT = 0x0001,
+ RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE = 0x0002,
+ RS_ALLOCATION_USAGE_GRAPHICS_VERTEX = 0x0004,
+ RS_ALLOCATION_USAGE_GRAPHICS_CONSTANTS = 0x0008,
+ RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET = 0x0010,
+ RS_ALLOCATION_USAGE_IO_INPUT = 0x0020,
+ RS_ALLOCATION_USAGE_IO_OUTPUT = 0x0040,
+ RS_ALLOCATION_USAGE_SHARED = 0x0080,
+
+ RS_ALLOCATION_USAGE_INCREMENTAL_SUPPORT = 0x1000,
+ RS_ALLOCATION_USAGE_OEM = 0x8000,
+ RS_ALLOCATION_USAGE_ALL = 0x80FF
+};
+
+enum RsAllocationMipmapControl {
+ RS_ALLOCATION_MIPMAP_NONE = 0,
+ RS_ALLOCATION_MIPMAP_FULL = 1,
+ RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE = 2
+};
+
+enum RsAllocationCubemapFace {
+ RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X = 0,
+ RS_ALLOCATION_CUBEMAP_FACE_NEGATIVE_X = 1,
+ RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_Y = 2,
+ RS_ALLOCATION_CUBEMAP_FACE_NEGATIVE_Y = 3,
+ RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_Z = 4,
+ RS_ALLOCATION_CUBEMAP_FACE_NEGATIVE_Z = 5
+};
+
+enum RsDataType {
+ RS_TYPE_NONE = 0,
+ RS_TYPE_FLOAT_16,
+ RS_TYPE_FLOAT_32,
+ RS_TYPE_FLOAT_64,
+ RS_TYPE_SIGNED_8,
+ RS_TYPE_SIGNED_16,
+ RS_TYPE_SIGNED_32,
+ RS_TYPE_SIGNED_64,
+ RS_TYPE_UNSIGNED_8,
+ RS_TYPE_UNSIGNED_16,
+ RS_TYPE_UNSIGNED_32,
+ RS_TYPE_UNSIGNED_64,
+
+ RS_TYPE_BOOLEAN,
+
+ RS_TYPE_UNSIGNED_5_6_5,
+ RS_TYPE_UNSIGNED_5_5_5_1,
+ RS_TYPE_UNSIGNED_4_4_4_4,
+
+ RS_TYPE_MATRIX_4X4,
+ RS_TYPE_MATRIX_3X3,
+ RS_TYPE_MATRIX_2X2,
+
+ RS_TYPE_ELEMENT = 1000,
+ RS_TYPE_TYPE,
+ RS_TYPE_ALLOCATION,
+ RS_TYPE_SAMPLER,
+ RS_TYPE_SCRIPT,
+ RS_TYPE_MESH,
+ RS_TYPE_PROGRAM_FRAGMENT,
+ RS_TYPE_PROGRAM_VERTEX,
+ RS_TYPE_PROGRAM_RASTER,
+ RS_TYPE_PROGRAM_STORE,
+ RS_TYPE_FONT,
+
+ RS_TYPE_INVALID = 10000,
+};
+
+enum RsDataKind {
+ RS_KIND_USER,
+
+ RS_KIND_PIXEL_L = 7,
+ RS_KIND_PIXEL_A,
+ RS_KIND_PIXEL_LA,
+ RS_KIND_PIXEL_RGB,
+ RS_KIND_PIXEL_RGBA,
+ RS_KIND_PIXEL_DEPTH,
+ RS_KIND_PIXEL_YUV,
+
+ RS_KIND_INVALID = 100,
+};
+
+enum RsYuvFormat {
+ RS_YUV_NONE = 0,
+ RS_YUV_YV12 = 0x32315659, // HAL_PIXEL_FORMAT_YV12 in system/graphics.h
+ RS_YUV_NV21 = 0x11, // HAL_PIXEL_FORMAT_YCrCb_420_SP
+ RS_YUV_420_888 = 0x23, // HAL_PIXEL_FORMAT_YCbCr_420_888
+};
+
+enum RsSamplerParam {
+ RS_SAMPLER_MIN_FILTER,
+ RS_SAMPLER_MAG_FILTER,
+ RS_SAMPLER_WRAP_S,
+ RS_SAMPLER_WRAP_T,
+ RS_SAMPLER_WRAP_R,
+ RS_SAMPLER_ANISO
+};
+
+enum RsSamplerValue {
+ RS_SAMPLER_NEAREST,
+ RS_SAMPLER_LINEAR,
+ RS_SAMPLER_LINEAR_MIP_LINEAR,
+ RS_SAMPLER_WRAP,
+ RS_SAMPLER_CLAMP,
+ RS_SAMPLER_LINEAR_MIP_NEAREST,
+ RS_SAMPLER_MIRRORED_REPEAT,
+
+ RS_SAMPLER_INVALID = 100,
+};
+
+enum RsDimension {
+ RS_DIMENSION_X,
+ RS_DIMENSION_Y,
+ RS_DIMENSION_Z,
+ RS_DIMENSION_LOD,
+ RS_DIMENSION_FACE,
+
+ RS_DIMENSION_ARRAY_0 = 100,
+ RS_DIMENSION_ARRAY_1,
+ RS_DIMENSION_ARRAY_2,
+ RS_DIMENSION_ARRAY_3,
+ RS_DIMENSION_MAX = RS_DIMENSION_ARRAY_3
+};
+
+
+enum RsError {
+ RS_ERROR_NONE = 0,
+ RS_ERROR_BAD_SHADER = 1,
+ RS_ERROR_BAD_SCRIPT = 2,
+ RS_ERROR_BAD_VALUE = 3,
+ RS_ERROR_OUT_OF_MEMORY = 4,
+ RS_ERROR_DRIVER = 5,
+
+ // Errors that only occur in the debug context.
+ RS_ERROR_FATAL_DEBUG = 0x0800,
+
+ RS_ERROR_FATAL_UNKNOWN = 0x1000,
+ RS_ERROR_FATAL_DRIVER = 0x1001,
+ RS_ERROR_FATAL_PROGRAM_LINK = 0x1002
+};
+
+enum RsForEachStrategy {
+ RS_FOR_EACH_STRATEGY_SERIAL = 0,
+ RS_FOR_EACH_STRATEGY_DONT_CARE = 1,
+ RS_FOR_EACH_STRATEGY_DST_LINEAR = 2,
+ RS_FOR_EACH_STRATEGY_TILE_SMALL = 3,
+ RS_FOR_EACH_STRATEGY_TILE_MEDIUM = 4,
+ RS_FOR_EACH_STRATEGY_TILE_LARGE = 5
+};
+
+// Script to Script
+typedef struct {
+ enum RsForEachStrategy strategy;
+ uint32_t xStart;
+ uint32_t xEnd;
+ uint32_t yStart;
+ uint32_t yEnd;
+ uint32_t zStart;
+ uint32_t zEnd;
+ uint32_t arrayStart;
+ uint32_t arrayEnd;
+ uint32_t array2Start;
+ uint32_t array2End;
+ uint32_t array3Start;
+ uint32_t array3End;
+ uint32_t array4Start;
+ uint32_t array4End;
+
+} RsScriptCall;
+
+enum RsContextFlags {
+ RS_CONTEXT_SYNCHRONOUS = 0x0001,
+ RS_CONTEXT_LOW_LATENCY = 0x0002,
+ RS_CONTEXT_LOW_POWER = 0x0004,
+ RS_CONTEXT_WAIT_FOR_ATTACH = 0x0008
+};
+
+enum RsBlasTranspose {
+ RsBlasNoTrans=111,
+ RsBlasTrans=112,
+ RsBlasConjTrans=113
+};
+
+enum RsBlasUplo {
+ RsBlasUpper=121,
+ RsBlasLower=122
+};
+
+enum RsBlasDiag {
+ RsBlasNonUnit=131,
+ RsBlasUnit=132
+};
+
+enum RsBlasSide {
+ RsBlasLeft=141,
+ RsBlasRight=142
+};
+
+enum RsBlasFunction {
+ RsBlas_nop = 0,
+ RsBlas_sdsdot = 1,
+ RsBlas_dsdot = 2,
+ RsBlas_sdot = 3,
+ RsBlas_ddot = 4,
+ RsBlas_cdotu_sub = 5,
+ RsBlas_cdotc_sub = 6,
+ RsBlas_zdotu_sub = 7,
+ RsBlas_zdotc_sub = 8,
+ RsBlas_snrm2 = 9,
+ RsBlas_sasum = 10,
+ RsBlas_dnrm2 = 11,
+ RsBlas_dasum = 12,
+ RsBlas_scnrm2 = 13,
+ RsBlas_scasum = 14,
+ RsBlas_dznrm2 = 15,
+ RsBlas_dzasum = 16,
+ RsBlas_isamax = 17,
+ RsBlas_idamax = 18,
+ RsBlas_icamax = 19,
+ RsBlas_izamax = 20,
+ RsBlas_sswap = 21,
+ RsBlas_scopy = 22,
+ RsBlas_saxpy = 23,
+ RsBlas_dswap = 24,
+ RsBlas_dcopy = 25,
+ RsBlas_daxpy = 26,
+ RsBlas_cswap = 27,
+ RsBlas_ccopy = 28,
+ RsBlas_caxpy = 29,
+ RsBlas_zswap = 30,
+ RsBlas_zcopy = 31,
+ RsBlas_zaxpy = 32,
+ RsBlas_srotg = 33,
+ RsBlas_srotmg = 34,
+ RsBlas_srot = 35,
+ RsBlas_srotm = 36,
+ RsBlas_drotg = 37,
+ RsBlas_drotmg = 38,
+ RsBlas_drot = 39,
+ RsBlas_drotm = 40,
+ RsBlas_sscal = 41,
+ RsBlas_dscal = 42,
+ RsBlas_cscal = 43,
+ RsBlas_zscal = 44,
+ RsBlas_csscal = 45,
+ RsBlas_zdscal = 46,
+ RsBlas_sgemv = 47,
+ RsBlas_sgbmv = 48,
+ RsBlas_strmv = 49,
+ RsBlas_stbmv = 50,
+ RsBlas_stpmv = 51,
+ RsBlas_strsv = 52,
+ RsBlas_stbsv = 53,
+ RsBlas_stpsv = 54,
+ RsBlas_dgemv = 55,
+ RsBlas_dgbmv = 56,
+ RsBlas_dtrmv = 57,
+ RsBlas_dtbmv = 58,
+ RsBlas_dtpmv = 59,
+ RsBlas_dtrsv = 60,
+ RsBlas_dtbsv = 61,
+ RsBlas_dtpsv = 62,
+ RsBlas_cgemv = 63,
+ RsBlas_cgbmv = 64,
+ RsBlas_ctrmv = 65,
+ RsBlas_ctbmv = 66,
+ RsBlas_ctpmv = 67,
+ RsBlas_ctrsv = 68,
+ RsBlas_ctbsv = 69,
+ RsBlas_ctpsv = 70,
+ RsBlas_zgemv = 71,
+ RsBlas_zgbmv = 72,
+ RsBlas_ztrmv = 73,
+ RsBlas_ztbmv = 74,
+ RsBlas_ztpmv = 75,
+ RsBlas_ztrsv = 76,
+ RsBlas_ztbsv = 77,
+ RsBlas_ztpsv = 78,
+ RsBlas_ssymv = 79,
+ RsBlas_ssbmv = 80,
+ RsBlas_sspmv = 81,
+ RsBlas_sger = 82,
+ RsBlas_ssyr = 83,
+ RsBlas_sspr = 84,
+ RsBlas_ssyr2 = 85,
+ RsBlas_sspr2 = 86,
+ RsBlas_dsymv = 87,
+ RsBlas_dsbmv = 88,
+ RsBlas_dspmv = 89,
+ RsBlas_dger = 90,
+ RsBlas_dsyr = 91,
+ RsBlas_dspr = 92,
+ RsBlas_dsyr2 = 93,
+ RsBlas_dspr2 = 94,
+ RsBlas_chemv = 95,
+ RsBlas_chbmv = 96,
+ RsBlas_chpmv = 97,
+ RsBlas_cgeru = 98,
+ RsBlas_cgerc = 99,
+ RsBlas_cher = 100,
+ RsBlas_chpr = 101,
+ RsBlas_cher2 = 102,
+ RsBlas_chpr2 = 103,
+ RsBlas_zhemv = 104,
+ RsBlas_zhbmv = 105,
+ RsBlas_zhpmv = 106,
+ RsBlas_zgeru = 107,
+ RsBlas_zgerc = 108,
+ RsBlas_zher = 109,
+ RsBlas_zhpr = 110,
+ RsBlas_zher2 = 111,
+ RsBlas_zhpr2 = 112,
+ RsBlas_sgemm = 113,
+ RsBlas_ssymm = 114,
+ RsBlas_ssyrk = 115,
+ RsBlas_ssyr2k = 116,
+ RsBlas_strmm = 117,
+ RsBlas_strsm = 118,
+ RsBlas_dgemm = 119,
+ RsBlas_dsymm = 120,
+ RsBlas_dsyrk = 121,
+ RsBlas_dsyr2k = 122,
+ RsBlas_dtrmm = 123,
+ RsBlas_dtrsm = 124,
+ RsBlas_cgemm = 125,
+ RsBlas_csymm = 126,
+ RsBlas_csyrk = 127,
+ RsBlas_csyr2k = 128,
+ RsBlas_ctrmm = 129,
+ RsBlas_ctrsm = 130,
+ RsBlas_zgemm = 131,
+ RsBlas_zsymm = 132,
+ RsBlas_zsyrk = 133,
+ RsBlas_zsyr2k = 134,
+ RsBlas_ztrmm = 135,
+ RsBlas_ztrsm = 136,
+ RsBlas_chemm = 137,
+ RsBlas_cherk = 138,
+ RsBlas_cher2k = 139,
+ RsBlas_zhemm = 140,
+ RsBlas_zherk = 141,
+ RsBlas_zher2k = 142,
+
+ // BLAS extensions start here
+ RsBlas_bnnm = 1000,
+};
+
+// custom complex types because of NDK support
+typedef struct {
+ float r;
+ float i;
+} RsFloatComplex;
+
+typedef struct {
+ double r;
+ double i;
+} RsDoubleComplex;
+
+typedef union {
+ float f;
+ RsFloatComplex c;
+ double d;
+ RsDoubleComplex z;
+} RsBlasScalar;
+
+typedef struct {
+ RsBlasFunction func;
+ RsBlasTranspose transA;
+ RsBlasTranspose transB;
+ RsBlasUplo uplo;
+ RsBlasDiag diag;
+ RsBlasSide side;
+ int M;
+ int N;
+ int K;
+ RsBlasScalar alpha;
+ RsBlasScalar beta;
+ int incX;
+ int incY;
+ int KL;
+ int KU;
+ uint8_t a_offset;
+ uint8_t b_offset;
+ int32_t c_offset;
+ int32_t c_mult_int;
+} RsBlasCall;
+
+enum RsGlobalProperty {
+ RS_GLOBAL_TYPE = 0x0000FFFF,
+ RS_GLOBAL_CONSTANT = 0x00010000,
+ RS_GLOBAL_STATIC = 0x00020000,
+ RS_GLOBAL_POINTER = 0x00040000
+};
+
+// Special symbols embedded into a shared object compiled by bcc.
+static const char kRoot[] = "root";
+static const char kInit[] = "init";
+static const char kRsDtor[] = ".rs.dtor";
+static const char kRsInfo[] = ".rs.info";
+static const char kRsGlobalEntries[] = ".rs.global_entries";
+static const char kRsGlobalNames[] = ".rs.global_names";
+static const char kRsGlobalAddresses[] = ".rs.global_addresses";
+static const char kRsGlobalSizes[] = ".rs.global_sizes";
+static const char kRsGlobalProperties[] = ".rs.global_properties";
+
+static inline uint32_t getGlobalRsType(uint32_t properties) {
+ return properties & RS_GLOBAL_TYPE;
+}
+static inline bool isGlobalConstant(uint32_t properties) {
+ return properties & RS_GLOBAL_CONSTANT;
+}
+static inline bool isGlobalStatic(uint32_t properties) {
+ return properties & RS_GLOBAL_STATIC;
+}
+static inline bool isGlobalPointer(uint32_t properties) {
+ return properties & RS_GLOBAL_POINTER;
+}
+
+#ifdef __cplusplus
+};
+#endif
+
+#endif // RENDER_SCRIPT_DEFINES_H
diff --git a/current/platform/rs/scriptc/rs_allocation_create.rsh b/current/platform/rs/scriptc/rs_allocation_create.rsh
new file mode 100644
index 0000000..d7f9fd6
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_allocation_create.rsh
@@ -0,0 +1,1345 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_allocation_create.rsh: Allocation Creation Functions
+ *
+ * The functions below can be used to create Allocations from a Script.
+ *
+ * These functions can be called directly or indirectly from an invokable
+ * function. If some control-flow path can result in a call to these functions
+ * from a RenderScript kernel function, a compiler error will be generated.
+ */
+
+#ifndef RENDERSCRIPT_RS_ALLOCATION_CREATE_RSH
+#define RENDERSCRIPT_RS_ALLOCATION_CREATE_RSH
+
+/*
+ * rsCreateElement: Creates an rs_element object of the specified data type
+ *
+ * Creates an rs_element object of the specified data type. The data kind of
+ * the Element will be set to RS_KIND_USER and vector_width will be set to 1,
+ * indicating non-vector.
+ *
+ * Parameters:
+ * data_type: Data type of the Element
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern rs_element __attribute__((overloadable))
+ rsCreateElement(rs_data_type data_type);
+#endif
+
+/*
+ * rsCreateVectorElement: Creates an rs_element object of the specified data type and vector width
+ *
+ * Creates an rs_element object of the specified data type and vector width.
+ * Value of vector_width must be 2, 3 or 4. The data kind of the Element will
+ * be set to RS_KIND_USER.
+ *
+ * Parameters:
+ * data_type: Data type of the Element
+ * vector_width: Vector width (either 2, 3, or 4)
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern rs_element __attribute__((overloadable))
+ rsCreateVectorElement(rs_data_type data_type, uint32_t vector_width);
+#endif
+
+/*
+ * rsCreatePixelElement: Creates an rs_element object of the specified data type and data kind
+ *
+ * Creates an rs_element object of the specified data type and data kind. The
+ * vector_width of the Element will be set to 1, indicating non-vector.
+ *
+ * Parameters:
+ * data_type: Data type of the Element
+ * data_kind: Data kind of the Element
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern rs_element __attribute__((overloadable))
+ rsCreatePixelElement(rs_data_type data_type, rs_data_kind data_kind);
+#endif
+
+/*
+ * rsCreateType: Creates an rs_type object with the specified Element and shape attributes
+ *
+ * Creates an rs_type object with the specified Element and shape attributes.
+ *
+ * dimX specifies the size of the X dimension.
+ *
+ * dimY, if present and non-zero, indicates that the Y dimension is present and
+ * indicates its size.
+ *
+ * dimZ, if present and non-zero, indicates that the Z dimension is present and
+ * indicates its size.
+ *
+ * mipmaps indicates the presence of level of detail (LOD).
+ *
+ * faces indicates the presence of cubemap faces.
+ *
+ * yuv_format indicates the associated YUV format (or RS_YUV_NONE).
+ *
+ * Parameters:
+ * element: Element to be associated with the Type
+ * dimX: Size along the X dimension
+ * dimY: Size along the Y dimension
+ * dimZ: Size along the Z dimension
+ * mipmaps: Flag indicating if the Type has a mipmap chain
+ * faces: Flag indicating if the Type is a cubemap
+ * yuv_format: YUV layout for the Type
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern rs_type __attribute__((overloadable))
+ rsCreateType(rs_element element, uint32_t dimX, uint32_t dimY, uint32_t dimZ, bool mipmaps,
+ bool faces, rs_yuv_format yuv_format);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern rs_type __attribute__((overloadable))
+ rsCreateType(rs_element element, uint32_t dimX, uint32_t dimY, uint32_t dimZ);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern rs_type __attribute__((overloadable))
+ rsCreateType(rs_element element, uint32_t dimX, uint32_t dimY);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern rs_type __attribute__((overloadable))
+ rsCreateType(rs_element element, uint32_t dimX);
+#endif
+
+/*
+ * rsCreateAllocation: Create an rs_allocation object of given Type.
+ *
+ * Creates an rs_allocation object of the given Type and usage.
+ *
+ * RS_ALLOCATION_USAGE_SCRIPT and RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE are the
+ * only supported usage flags for Allocations created from within a RenderScript
+ * Script.
+ *
+ * You can also use rsCreateAllocation_ wrapper functions to directly
+ * create Allocations of scalar and vector numerical types without creating
+ * intermediate rs_element or rs_type objects.
+ *
+ * E.g. rsCreateAllocation_int4() returns an Allocation of int4 data type of
+ * specified dimensions.
+ *
+ * Parameters:
+ * type: Type of the Allocation
+ * usage: Usage flag for the allocation
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern rs_allocation __attribute__((overloadable))
+ rsCreateAllocation(rs_type type, uint32_t usage);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern rs_allocation __attribute__((overloadable))
+ rsCreateAllocation(rs_type type);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_FLOAT_16);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_FLOAT_32);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_FLOAT_64);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_8);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_8);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_16);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_16);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_32);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_32);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_64);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_64);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_16, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_16, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_16, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_32, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_32, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_32, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_64, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_64, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_64, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_8, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_8, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_8, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_8, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_8, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_8, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_16, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_16, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_16, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_16, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_16, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_16, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_32, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_32, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_32, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_32, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_32, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_32, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_64, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_64, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_64, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong2(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_64, 2);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong3(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_64, 3);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong4(uint32_t dimX, uint32_t dimY, uint32_t dimZ) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_64, 4);
+ rs_type t = rsCreateType(e, dimX, dimY, dimZ);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_FLOAT_16);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_FLOAT_32);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_FLOAT_64);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_8);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_8);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_16);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_16);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_32);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_32);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_64);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_64);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_16, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_16, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_16, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_32, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_32, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_32, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_64, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_64, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_64, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_8, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_8, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_8, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_8, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_8, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_8, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_16, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_16, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_16, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_16, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_16, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_16, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_32, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_32, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_32, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_32, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_32, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_32, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_64, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_64, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_64, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong2(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_64, 2);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong3(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_64, 3);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong4(uint32_t dimX, uint32_t dimY) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_64, 4);
+ rs_type t = rsCreateType(e, dimX, dimY);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_FLOAT_16);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_FLOAT_32);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_FLOAT_64);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_8);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_8);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_16);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_16);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_32);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_32);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_SIGNED_64);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong(uint32_t dimX) {
+ rs_element e = rsCreateElement(RS_TYPE_UNSIGNED_64);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_16, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_16, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_half4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_16, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_32, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_32, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_float4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_32, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_64, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_64, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_double4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_FLOAT_64, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_8, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_8, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_char4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_8, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_8, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_8, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uchar4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_8, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_16, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_16, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_short4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_16, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_16, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_16, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ushort4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_16, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_32, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_32, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_int4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_32, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_32, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_32, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_uint4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_32, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_64, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_64, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_long4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_SIGNED_64, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong2(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_64, 2);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong3(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_64, 3);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+static inline rs_allocation __attribute__((overloadable))
+ rsCreateAllocation_ulong4(uint32_t dimX) {
+ rs_element e = rsCreateVectorElement(RS_TYPE_UNSIGNED_64, 4);
+ rs_type t = rsCreateType(e, dimX);
+ return rsCreateAllocation(t);
+}
+#endif
+
+#endif // RENDERSCRIPT_RS_ALLOCATION_CREATE_RSH
diff --git a/current/platform/rs/scriptc/rs_allocation_data.rsh b/current/platform/rs/scriptc/rs_allocation_data.rsh
new file mode 100644
index 0000000..ea16767
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_allocation_data.rsh
@@ -0,0 +1,3365 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_allocation_data.rsh: Allocation Data Access Functions
+ *
+ * The functions below can be used to get and set the cells that comprise
+ * an allocation.
+ *
+ * - Individual cells are accessed using the rsGetElementAt* and
+ * rsSetElementAt functions.
+ * - Multiple cells can be copied using the rsAllocationCopy* and
+ * rsAllocationV* functions.
+ * - For getting values through a sampler, use rsSample.
+ *
+ * The rsGetElementAt and rsSetElement* functions are somewhat misnamed.
+ * They don't get or set elements, which are akin to data types; they get
+ * or set cells. Think of them as rsGetCellAt and and rsSetCellAt.
+ */
+
+#ifndef RENDERSCRIPT_RS_ALLOCATION_DATA_RSH
+#define RENDERSCRIPT_RS_ALLOCATION_DATA_RSH
+
+/*
+ * rsAllocationCopy1DRange: Copy consecutive cells between allocations
+ *
+ * Copies the specified number of cells from one allocation to another.
+ *
+ * The two allocations must be different. Using this function to copy whithin
+ * the same allocation yields undefined results.
+ *
+ * The function does not validate whether the offset plus count exceeds the size
+ * of either allocation. Be careful!
+ *
+ * This function should only be called between 1D allocations. Calling it
+ * on other allocations is undefined.
+ *
+ * This function should not be called from inside a kernel, or from any function
+ * that may be called directly or indirectly from a kernel. Doing so would cause a
+ * runtime error.
+ *
+ * Parameters:
+ * dstAlloc: Allocation to copy cells into.
+ * dstOff: Offset in the destination of the first cell to be copied into.
+ * dstMip: Mip level in the destination allocation. 0 if mip mapping is not used.
+ * count: Number of cells to be copied.
+ * srcAlloc: Source allocation.
+ * srcOff: Offset in the source of the first cell to be copied.
+ * srcMip: Mip level in the source allocation. 0 if mip mapping is not used.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern void __attribute__((overloadable))
+ rsAllocationCopy1DRange(rs_allocation dstAlloc, uint32_t dstOff, uint32_t dstMip, uint32_t count,
+ rs_allocation srcAlloc, uint32_t srcOff, uint32_t srcMip);
+#endif
+
+/*
+ * rsAllocationCopy2DRange: Copy a rectangular region of cells between allocations
+ *
+ * Copies a rectangular region of cells from one allocation to another.
+ * (width * heigth) cells are copied.
+ *
+ * The two allocations must be different. Using this function to copy whithin
+ * the same allocation yields undefined results.
+ *
+ * The function does not validate whether the the source or destination region
+ * exceeds the size of its respective allocation. Be careful!
+ *
+ * This function should only be called between 2D allocations. Calling it
+ * on other allocations is undefined.
+ *
+ * This function should not be called from inside a kernel, or from any function
+ * that may be called directly or indirectly from a kernel. Doing so would cause a
+ * runtime error.
+ *
+ * Parameters:
+ * dstAlloc: Allocation to copy cells into.
+ * dstXoff: X offset in the destination of the region to be set.
+ * dstYoff: Y offset in the destination of the region to be set.
+ * dstMip: Mip level in the destination allocation. 0 if mip mapping is not used.
+ * dstFace: Cubemap face of the destination allocation. Ignored for allocations that aren't cubemaps.
+ * width: Width of the incoming region to update.
+ * height: Height of the incoming region to update.
+ * srcAlloc: Source allocation.
+ * srcXoff: X offset in the source.
+ * srcYoff: Y offset in the source.
+ * srcMip: Mip level in the source allocation. 0 if mip mapping is not used.
+ * srcFace: Cubemap face of the source allocation. Ignored for allocations that aren't cubemaps.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern void __attribute__((overloadable))
+ rsAllocationCopy2DRange(rs_allocation dstAlloc, uint32_t dstXoff, uint32_t dstYoff,
+ uint32_t dstMip, rs_allocation_cubemap_face dstFace, uint32_t width,
+ uint32_t height, rs_allocation srcAlloc, uint32_t srcXoff,
+ uint32_t srcYoff, uint32_t srcMip, rs_allocation_cubemap_face srcFace);
+#endif
+
+/*
+ * rsAllocationVLoadX: Get a vector from an allocation of scalars
+ *
+ * This function returns a vector composed of successive cells of the allocation.
+ * It assumes that the allocation contains scalars.
+ *
+ * The "X" in the name indicates that successive values are extracted by
+ * increasing the X index. There are currently no functions to get successive
+ * values incrementing other dimensions. Use multiple calls to rsGetElementAt()
+ * instead.
+ *
+ * For example, when calling rsAllocationVLoadX_int4(a, 20, 30), an int4 composed
+ * of a[20, 30], a[21, 30], a[22, 30], and a[23, 30] is returned.
+ *
+ * When retrieving from a three dimensional allocations, use the x, y, z variant.
+ * Similarly, use the x, y variant for two dimensional allocations and x for the
+ * mono dimensional allocations.
+ *
+ * For efficiency, this function does not validate the inputs. Trying to wrap
+ * the X index, exceeding the size of the allocation, or using indices incompatible
+ * with the dimensionality of the allocation yields undefined results.
+ *
+ * See also rsAllocationVStoreX().
+ *
+ * Parameters:
+ * a: Allocation to get the data from.
+ * x: X offset in the allocation of the first cell to be copied from.
+ * y: Y offset in the allocation of the first cell to be copied from.
+ * z: Z offset in the allocation of the first cell to be copied from.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern float2 __attribute__((overloadable))
+ rsAllocationVLoadX_float2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern float3 __attribute__((overloadable))
+ rsAllocationVLoadX_float3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern float4 __attribute__((overloadable))
+ rsAllocationVLoadX_float4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern double2 __attribute__((overloadable))
+ rsAllocationVLoadX_double2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern double3 __attribute__((overloadable))
+ rsAllocationVLoadX_double3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern double4 __attribute__((overloadable))
+ rsAllocationVLoadX_double4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern char2 __attribute__((overloadable))
+ rsAllocationVLoadX_char2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern char3 __attribute__((overloadable))
+ rsAllocationVLoadX_char3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern char4 __attribute__((overloadable))
+ rsAllocationVLoadX_char4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uchar2 __attribute__((overloadable))
+ rsAllocationVLoadX_uchar2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uchar3 __attribute__((overloadable))
+ rsAllocationVLoadX_uchar3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uchar4 __attribute__((overloadable))
+ rsAllocationVLoadX_uchar4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern short2 __attribute__((overloadable))
+ rsAllocationVLoadX_short2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern short3 __attribute__((overloadable))
+ rsAllocationVLoadX_short3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern short4 __attribute__((overloadable))
+ rsAllocationVLoadX_short4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ushort2 __attribute__((overloadable))
+ rsAllocationVLoadX_ushort2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ushort3 __attribute__((overloadable))
+ rsAllocationVLoadX_ushort3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ushort4 __attribute__((overloadable))
+ rsAllocationVLoadX_ushort4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern int2 __attribute__((overloadable))
+ rsAllocationVLoadX_int2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern int3 __attribute__((overloadable))
+ rsAllocationVLoadX_int3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern int4 __attribute__((overloadable))
+ rsAllocationVLoadX_int4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uint2 __attribute__((overloadable))
+ rsAllocationVLoadX_uint2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uint3 __attribute__((overloadable))
+ rsAllocationVLoadX_uint3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uint4 __attribute__((overloadable))
+ rsAllocationVLoadX_uint4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern long2 __attribute__((overloadable))
+ rsAllocationVLoadX_long2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern long3 __attribute__((overloadable))
+ rsAllocationVLoadX_long3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern long4 __attribute__((overloadable))
+ rsAllocationVLoadX_long4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ulong2 __attribute__((overloadable))
+ rsAllocationVLoadX_ulong2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ulong3 __attribute__((overloadable))
+ rsAllocationVLoadX_ulong3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ulong4 __attribute__((overloadable))
+ rsAllocationVLoadX_ulong4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern float2 __attribute__((overloadable))
+ rsAllocationVLoadX_float2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern float3 __attribute__((overloadable))
+ rsAllocationVLoadX_float3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern float4 __attribute__((overloadable))
+ rsAllocationVLoadX_float4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern double2 __attribute__((overloadable))
+ rsAllocationVLoadX_double2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern double3 __attribute__((overloadable))
+ rsAllocationVLoadX_double3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern double4 __attribute__((overloadable))
+ rsAllocationVLoadX_double4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern char2 __attribute__((overloadable))
+ rsAllocationVLoadX_char2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern char3 __attribute__((overloadable))
+ rsAllocationVLoadX_char3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern char4 __attribute__((overloadable))
+ rsAllocationVLoadX_char4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uchar2 __attribute__((overloadable))
+ rsAllocationVLoadX_uchar2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uchar3 __attribute__((overloadable))
+ rsAllocationVLoadX_uchar3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uchar4 __attribute__((overloadable))
+ rsAllocationVLoadX_uchar4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern short2 __attribute__((overloadable))
+ rsAllocationVLoadX_short2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern short3 __attribute__((overloadable))
+ rsAllocationVLoadX_short3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern short4 __attribute__((overloadable))
+ rsAllocationVLoadX_short4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ushort2 __attribute__((overloadable))
+ rsAllocationVLoadX_ushort2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ushort3 __attribute__((overloadable))
+ rsAllocationVLoadX_ushort3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ushort4 __attribute__((overloadable))
+ rsAllocationVLoadX_ushort4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern int2 __attribute__((overloadable))
+ rsAllocationVLoadX_int2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern int3 __attribute__((overloadable))
+ rsAllocationVLoadX_int3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern int4 __attribute__((overloadable))
+ rsAllocationVLoadX_int4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uint2 __attribute__((overloadable))
+ rsAllocationVLoadX_uint2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uint3 __attribute__((overloadable))
+ rsAllocationVLoadX_uint3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uint4 __attribute__((overloadable))
+ rsAllocationVLoadX_uint4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern long2 __attribute__((overloadable))
+ rsAllocationVLoadX_long2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern long3 __attribute__((overloadable))
+ rsAllocationVLoadX_long3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern long4 __attribute__((overloadable))
+ rsAllocationVLoadX_long4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ulong2 __attribute__((overloadable))
+ rsAllocationVLoadX_ulong2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ulong3 __attribute__((overloadable))
+ rsAllocationVLoadX_ulong3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ulong4 __attribute__((overloadable))
+ rsAllocationVLoadX_ulong4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern float2 __attribute__((overloadable))
+ rsAllocationVLoadX_float2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern float3 __attribute__((overloadable))
+ rsAllocationVLoadX_float3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern float4 __attribute__((overloadable))
+ rsAllocationVLoadX_float4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern double2 __attribute__((overloadable))
+ rsAllocationVLoadX_double2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern double3 __attribute__((overloadable))
+ rsAllocationVLoadX_double3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern double4 __attribute__((overloadable))
+ rsAllocationVLoadX_double4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern char2 __attribute__((overloadable))
+ rsAllocationVLoadX_char2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern char3 __attribute__((overloadable))
+ rsAllocationVLoadX_char3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern char4 __attribute__((overloadable))
+ rsAllocationVLoadX_char4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uchar2 __attribute__((overloadable))
+ rsAllocationVLoadX_uchar2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uchar3 __attribute__((overloadable))
+ rsAllocationVLoadX_uchar3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uchar4 __attribute__((overloadable))
+ rsAllocationVLoadX_uchar4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern short2 __attribute__((overloadable))
+ rsAllocationVLoadX_short2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern short3 __attribute__((overloadable))
+ rsAllocationVLoadX_short3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern short4 __attribute__((overloadable))
+ rsAllocationVLoadX_short4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ushort2 __attribute__((overloadable))
+ rsAllocationVLoadX_ushort2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ushort3 __attribute__((overloadable))
+ rsAllocationVLoadX_ushort3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ushort4 __attribute__((overloadable))
+ rsAllocationVLoadX_ushort4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern int2 __attribute__((overloadable))
+ rsAllocationVLoadX_int2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern int3 __attribute__((overloadable))
+ rsAllocationVLoadX_int3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern int4 __attribute__((overloadable))
+ rsAllocationVLoadX_int4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uint2 __attribute__((overloadable))
+ rsAllocationVLoadX_uint2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uint3 __attribute__((overloadable))
+ rsAllocationVLoadX_uint3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern uint4 __attribute__((overloadable))
+ rsAllocationVLoadX_uint4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern long2 __attribute__((overloadable))
+ rsAllocationVLoadX_long2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern long3 __attribute__((overloadable))
+ rsAllocationVLoadX_long3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern long4 __attribute__((overloadable))
+ rsAllocationVLoadX_long4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ulong2 __attribute__((overloadable))
+ rsAllocationVLoadX_ulong2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ulong3 __attribute__((overloadable))
+ rsAllocationVLoadX_ulong3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern ulong4 __attribute__((overloadable))
+ rsAllocationVLoadX_ulong4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+/*
+ * rsAllocationVStoreX: Store a vector into an allocation of scalars
+ *
+ * This function stores the entries of a vector into successive cells of an allocation.
+ * It assumes that the allocation contains scalars.
+ *
+ * The "X" in the name indicates that successive values are stored by increasing
+ * the X index. There are currently no functions to store successive values
+ * incrementing other dimensions. Use multiple calls to rsSetElementAt() instead.
+ *
+ * For example, when calling rsAllocationVStoreX_int3(a, v, 20, 30), v.x is stored
+ * at a[20, 30], v.y at a[21, 30], and v.z at a[22, 30].
+ *
+ * When storing into a three dimensional allocations, use the x, y, z variant.
+ * Similarly, use the x, y variant for two dimensional allocations and x for the
+ * mono dimensional allocations.
+ *
+ * For efficiency, this function does not validate the inputs. Trying to wrap the
+ * X index, exceeding the size of the allocation, or using indices incompatible
+ * with the dimensionality of the allocation yiels undefined results.
+ *
+ * See also rsAllocationVLoadX().
+ *
+ * Parameters:
+ * a: Allocation to store the data into.
+ * val: Value to be stored.
+ * x: X offset in the allocation of the first cell to be copied into.
+ * y: Y offset in the allocation of the first cell to be copied into.
+ * z: Z offset in the allocation of the first cell to be copied into.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_float2(rs_allocation a, float2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_float3(rs_allocation a, float3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_float4(rs_allocation a, float4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_double2(rs_allocation a, double2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_double3(rs_allocation a, double3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_double4(rs_allocation a, double4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_char2(rs_allocation a, char2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_char3(rs_allocation a, char3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_char4(rs_allocation a, char4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uchar2(rs_allocation a, uchar2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uchar3(rs_allocation a, uchar3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uchar4(rs_allocation a, uchar4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_short2(rs_allocation a, short2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_short3(rs_allocation a, short3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_short4(rs_allocation a, short4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ushort2(rs_allocation a, ushort2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ushort3(rs_allocation a, ushort3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ushort4(rs_allocation a, ushort4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_int2(rs_allocation a, int2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_int3(rs_allocation a, int3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_int4(rs_allocation a, int4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uint2(rs_allocation a, uint2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uint3(rs_allocation a, uint3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uint4(rs_allocation a, uint4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_long2(rs_allocation a, long2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_long3(rs_allocation a, long3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_long4(rs_allocation a, long4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ulong2(rs_allocation a, ulong2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ulong3(rs_allocation a, ulong3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ulong4(rs_allocation a, ulong4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_float2(rs_allocation a, float2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_float3(rs_allocation a, float3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_float4(rs_allocation a, float4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_double2(rs_allocation a, double2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_double3(rs_allocation a, double3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_double4(rs_allocation a, double4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_char2(rs_allocation a, char2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_char3(rs_allocation a, char3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_char4(rs_allocation a, char4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uchar2(rs_allocation a, uchar2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uchar3(rs_allocation a, uchar3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uchar4(rs_allocation a, uchar4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_short2(rs_allocation a, short2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_short3(rs_allocation a, short3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_short4(rs_allocation a, short4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ushort2(rs_allocation a, ushort2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ushort3(rs_allocation a, ushort3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ushort4(rs_allocation a, ushort4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_int2(rs_allocation a, int2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_int3(rs_allocation a, int3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_int4(rs_allocation a, int4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uint2(rs_allocation a, uint2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uint3(rs_allocation a, uint3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uint4(rs_allocation a, uint4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_long2(rs_allocation a, long2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_long3(rs_allocation a, long3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_long4(rs_allocation a, long4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ulong2(rs_allocation a, ulong2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ulong3(rs_allocation a, ulong3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ulong4(rs_allocation a, ulong4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_float2(rs_allocation a, float2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_float3(rs_allocation a, float3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_float4(rs_allocation a, float4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_double2(rs_allocation a, double2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_double3(rs_allocation a, double3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_double4(rs_allocation a, double4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_char2(rs_allocation a, char2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_char3(rs_allocation a, char3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_char4(rs_allocation a, char4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uchar2(rs_allocation a, uchar2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uchar3(rs_allocation a, uchar3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uchar4(rs_allocation a, uchar4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_short2(rs_allocation a, short2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_short3(rs_allocation a, short3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_short4(rs_allocation a, short4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ushort2(rs_allocation a, ushort2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ushort3(rs_allocation a, ushort3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ushort4(rs_allocation a, ushort4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_int2(rs_allocation a, int2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_int3(rs_allocation a, int3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_int4(rs_allocation a, int4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uint2(rs_allocation a, uint2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uint3(rs_allocation a, uint3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_uint4(rs_allocation a, uint4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_long2(rs_allocation a, long2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_long3(rs_allocation a, long3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_long4(rs_allocation a, long4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ulong2(rs_allocation a, ulong2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ulong3(rs_allocation a, ulong3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+extern void __attribute__((overloadable))
+ rsAllocationVStoreX_ulong4(rs_allocation a, ulong4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+/*
+ * rsGetElementAt: Return a cell from an allocation
+ *
+ * This function extracts a single cell from an allocation.
+ *
+ * When retrieving from a three dimensional allocations, use the x, y, z variant.
+ * Similarly, use the x, y variant for two dimensional allocations and x for the
+ * mono dimensional allocations.
+ *
+ * This function has two styles. One returns the address of the value using a void*,
+ * the other returns the actual value, e.g. rsGetElementAt() vs. rsGetElementAt_int4().
+ * For primitive types, always use the latter as it is more efficient.
+ */
+extern const void* __attribute__((overloadable))
+ rsGetElementAt(rs_allocation a, uint32_t x);
+
+extern const void* __attribute__((overloadable))
+ rsGetElementAt(rs_allocation a, uint32_t x, uint32_t y);
+
+extern const void* __attribute__((overloadable))
+ rsGetElementAt(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float __attribute__((overloadable))
+ rsGetElementAt_float(rs_allocation a, uint32_t x) {
+ return ((float *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float2 __attribute__((overloadable))
+ rsGetElementAt_float2(rs_allocation a, uint32_t x) {
+ return ((float2 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float3 __attribute__((overloadable))
+ rsGetElementAt_float3(rs_allocation a, uint32_t x) {
+ return ((float3 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float4 __attribute__((overloadable))
+ rsGetElementAt_float4(rs_allocation a, uint32_t x) {
+ return ((float4 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double __attribute__((overloadable))
+ rsGetElementAt_double(rs_allocation a, uint32_t x) {
+ return ((double *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double2 __attribute__((overloadable))
+ rsGetElementAt_double2(rs_allocation a, uint32_t x) {
+ return ((double2 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double3 __attribute__((overloadable))
+ rsGetElementAt_double3(rs_allocation a, uint32_t x) {
+ return ((double3 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double4 __attribute__((overloadable))
+ rsGetElementAt_double4(rs_allocation a, uint32_t x) {
+ return ((double4 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char __attribute__((overloadable))
+ rsGetElementAt_char(rs_allocation a, uint32_t x) {
+ return ((char *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char2 __attribute__((overloadable))
+ rsGetElementAt_char2(rs_allocation a, uint32_t x) {
+ return ((char2 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char3 __attribute__((overloadable))
+ rsGetElementAt_char3(rs_allocation a, uint32_t x) {
+ return ((char3 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char4 __attribute__((overloadable))
+ rsGetElementAt_char4(rs_allocation a, uint32_t x) {
+ return ((char4 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar __attribute__((overloadable))
+ rsGetElementAt_uchar(rs_allocation a, uint32_t x) {
+ return ((uchar *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar2 __attribute__((overloadable))
+ rsGetElementAt_uchar2(rs_allocation a, uint32_t x) {
+ return ((uchar2 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar3 __attribute__((overloadable))
+ rsGetElementAt_uchar3(rs_allocation a, uint32_t x) {
+ return ((uchar3 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar4 __attribute__((overloadable))
+ rsGetElementAt_uchar4(rs_allocation a, uint32_t x) {
+ return ((uchar4 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short __attribute__((overloadable))
+ rsGetElementAt_short(rs_allocation a, uint32_t x) {
+ return ((short *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short2 __attribute__((overloadable))
+ rsGetElementAt_short2(rs_allocation a, uint32_t x) {
+ return ((short2 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short3 __attribute__((overloadable))
+ rsGetElementAt_short3(rs_allocation a, uint32_t x) {
+ return ((short3 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short4 __attribute__((overloadable))
+ rsGetElementAt_short4(rs_allocation a, uint32_t x) {
+ return ((short4 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort __attribute__((overloadable))
+ rsGetElementAt_ushort(rs_allocation a, uint32_t x) {
+ return ((ushort *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort2 __attribute__((overloadable))
+ rsGetElementAt_ushort2(rs_allocation a, uint32_t x) {
+ return ((ushort2 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort3 __attribute__((overloadable))
+ rsGetElementAt_ushort3(rs_allocation a, uint32_t x) {
+ return ((ushort3 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort4 __attribute__((overloadable))
+ rsGetElementAt_ushort4(rs_allocation a, uint32_t x) {
+ return ((ushort4 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int __attribute__((overloadable))
+ rsGetElementAt_int(rs_allocation a, uint32_t x) {
+ return ((int *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int2 __attribute__((overloadable))
+ rsGetElementAt_int2(rs_allocation a, uint32_t x) {
+ return ((int2 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int3 __attribute__((overloadable))
+ rsGetElementAt_int3(rs_allocation a, uint32_t x) {
+ return ((int3 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int4 __attribute__((overloadable))
+ rsGetElementAt_int4(rs_allocation a, uint32_t x) {
+ return ((int4 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint __attribute__((overloadable))
+ rsGetElementAt_uint(rs_allocation a, uint32_t x) {
+ return ((uint *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint2 __attribute__((overloadable))
+ rsGetElementAt_uint2(rs_allocation a, uint32_t x) {
+ return ((uint2 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint3 __attribute__((overloadable))
+ rsGetElementAt_uint3(rs_allocation a, uint32_t x) {
+ return ((uint3 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint4 __attribute__((overloadable))
+ rsGetElementAt_uint4(rs_allocation a, uint32_t x) {
+ return ((uint4 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long __attribute__((overloadable))
+ rsGetElementAt_long(rs_allocation a, uint32_t x) {
+ return ((long *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long2 __attribute__((overloadable))
+ rsGetElementAt_long2(rs_allocation a, uint32_t x) {
+ return ((long2 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long3 __attribute__((overloadable))
+ rsGetElementAt_long3(rs_allocation a, uint32_t x) {
+ return ((long3 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long4 __attribute__((overloadable))
+ rsGetElementAt_long4(rs_allocation a, uint32_t x) {
+ return ((long4 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong __attribute__((overloadable))
+ rsGetElementAt_ulong(rs_allocation a, uint32_t x) {
+ return ((ulong *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong2 __attribute__((overloadable))
+ rsGetElementAt_ulong2(rs_allocation a, uint32_t x) {
+ return ((ulong2 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong3 __attribute__((overloadable))
+ rsGetElementAt_ulong3(rs_allocation a, uint32_t x) {
+ return ((ulong3 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong4 __attribute__((overloadable))
+ rsGetElementAt_ulong4(rs_allocation a, uint32_t x) {
+ return ((ulong4 *)rsGetElementAt(a, x))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float __attribute__((overloadable))
+ rsGetElementAt_float(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((float *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float2 __attribute__((overloadable))
+ rsGetElementAt_float2(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((float2 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float3 __attribute__((overloadable))
+ rsGetElementAt_float3(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((float3 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float4 __attribute__((overloadable))
+ rsGetElementAt_float4(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((float4 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double __attribute__((overloadable))
+ rsGetElementAt_double(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((double *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double2 __attribute__((overloadable))
+ rsGetElementAt_double2(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((double2 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double3 __attribute__((overloadable))
+ rsGetElementAt_double3(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((double3 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double4 __attribute__((overloadable))
+ rsGetElementAt_double4(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((double4 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char __attribute__((overloadable))
+ rsGetElementAt_char(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((char *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char2 __attribute__((overloadable))
+ rsGetElementAt_char2(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((char2 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char3 __attribute__((overloadable))
+ rsGetElementAt_char3(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((char3 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char4 __attribute__((overloadable))
+ rsGetElementAt_char4(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((char4 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar __attribute__((overloadable))
+ rsGetElementAt_uchar(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((uchar *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar2 __attribute__((overloadable))
+ rsGetElementAt_uchar2(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((uchar2 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar3 __attribute__((overloadable))
+ rsGetElementAt_uchar3(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((uchar3 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar4 __attribute__((overloadable))
+ rsGetElementAt_uchar4(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((uchar4 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short __attribute__((overloadable))
+ rsGetElementAt_short(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((short *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short2 __attribute__((overloadable))
+ rsGetElementAt_short2(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((short2 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short3 __attribute__((overloadable))
+ rsGetElementAt_short3(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((short3 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short4 __attribute__((overloadable))
+ rsGetElementAt_short4(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((short4 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort __attribute__((overloadable))
+ rsGetElementAt_ushort(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((ushort *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort2 __attribute__((overloadable))
+ rsGetElementAt_ushort2(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((ushort2 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort3 __attribute__((overloadable))
+ rsGetElementAt_ushort3(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((ushort3 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort4 __attribute__((overloadable))
+ rsGetElementAt_ushort4(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((ushort4 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int __attribute__((overloadable))
+ rsGetElementAt_int(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((int *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int2 __attribute__((overloadable))
+ rsGetElementAt_int2(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((int2 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int3 __attribute__((overloadable))
+ rsGetElementAt_int3(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((int3 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int4 __attribute__((overloadable))
+ rsGetElementAt_int4(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((int4 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint __attribute__((overloadable))
+ rsGetElementAt_uint(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((uint *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint2 __attribute__((overloadable))
+ rsGetElementAt_uint2(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((uint2 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint3 __attribute__((overloadable))
+ rsGetElementAt_uint3(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((uint3 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint4 __attribute__((overloadable))
+ rsGetElementAt_uint4(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((uint4 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long __attribute__((overloadable))
+ rsGetElementAt_long(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((long *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long2 __attribute__((overloadable))
+ rsGetElementAt_long2(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((long2 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long3 __attribute__((overloadable))
+ rsGetElementAt_long3(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((long3 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long4 __attribute__((overloadable))
+ rsGetElementAt_long4(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((long4 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong __attribute__((overloadable))
+ rsGetElementAt_ulong(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((ulong *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong2 __attribute__((overloadable))
+ rsGetElementAt_ulong2(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((ulong2 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong3 __attribute__((overloadable))
+ rsGetElementAt_ulong3(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((ulong3 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong4 __attribute__((overloadable))
+ rsGetElementAt_ulong4(rs_allocation a, uint32_t x, uint32_t y) {
+ return ((ulong4 *)rsGetElementAt(a, x, y))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float __attribute__((overloadable))
+ rsGetElementAt_float(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((float *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float2 __attribute__((overloadable))
+ rsGetElementAt_float2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((float2 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float3 __attribute__((overloadable))
+ rsGetElementAt_float3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((float3 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline float4 __attribute__((overloadable))
+ rsGetElementAt_float4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((float4 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double __attribute__((overloadable))
+ rsGetElementAt_double(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((double *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double2 __attribute__((overloadable))
+ rsGetElementAt_double2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((double2 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double3 __attribute__((overloadable))
+ rsGetElementAt_double3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((double3 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline double4 __attribute__((overloadable))
+ rsGetElementAt_double4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((double4 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char __attribute__((overloadable))
+ rsGetElementAt_char(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((char *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char2 __attribute__((overloadable))
+ rsGetElementAt_char2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((char2 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char3 __attribute__((overloadable))
+ rsGetElementAt_char3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((char3 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline char4 __attribute__((overloadable))
+ rsGetElementAt_char4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((char4 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar __attribute__((overloadable))
+ rsGetElementAt_uchar(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((uchar *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar2 __attribute__((overloadable))
+ rsGetElementAt_uchar2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((uchar2 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar3 __attribute__((overloadable))
+ rsGetElementAt_uchar3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((uchar3 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uchar4 __attribute__((overloadable))
+ rsGetElementAt_uchar4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((uchar4 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short __attribute__((overloadable))
+ rsGetElementAt_short(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((short *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short2 __attribute__((overloadable))
+ rsGetElementAt_short2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((short2 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short3 __attribute__((overloadable))
+ rsGetElementAt_short3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((short3 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline short4 __attribute__((overloadable))
+ rsGetElementAt_short4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((short4 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort __attribute__((overloadable))
+ rsGetElementAt_ushort(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((ushort *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort2 __attribute__((overloadable))
+ rsGetElementAt_ushort2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((ushort2 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort3 __attribute__((overloadable))
+ rsGetElementAt_ushort3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((ushort3 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ushort4 __attribute__((overloadable))
+ rsGetElementAt_ushort4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((ushort4 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int __attribute__((overloadable))
+ rsGetElementAt_int(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((int *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int2 __attribute__((overloadable))
+ rsGetElementAt_int2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((int2 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int3 __attribute__((overloadable))
+ rsGetElementAt_int3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((int3 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline int4 __attribute__((overloadable))
+ rsGetElementAt_int4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((int4 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint __attribute__((overloadable))
+ rsGetElementAt_uint(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((uint *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint2 __attribute__((overloadable))
+ rsGetElementAt_uint2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((uint2 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint3 __attribute__((overloadable))
+ rsGetElementAt_uint3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((uint3 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline uint4 __attribute__((overloadable))
+ rsGetElementAt_uint4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((uint4 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long __attribute__((overloadable))
+ rsGetElementAt_long(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((long *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long2 __attribute__((overloadable))
+ rsGetElementAt_long2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((long2 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long3 __attribute__((overloadable))
+ rsGetElementAt_long3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((long3 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline long4 __attribute__((overloadable))
+ rsGetElementAt_long4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((long4 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong __attribute__((overloadable))
+ rsGetElementAt_ulong(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((ulong *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong2 __attribute__((overloadable))
+ rsGetElementAt_ulong2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((ulong2 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong3 __attribute__((overloadable))
+ rsGetElementAt_ulong3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((ulong3 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 17)
+static inline ulong4 __attribute__((overloadable))
+ rsGetElementAt_ulong4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+ return ((ulong4 *)rsGetElementAt(a, x, y, z))[0];
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float __attribute__((overloadable))
+ rsGetElementAt_float(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float2 __attribute__((overloadable))
+ rsGetElementAt_float2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float3 __attribute__((overloadable))
+ rsGetElementAt_float3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float4 __attribute__((overloadable))
+ rsGetElementAt_float4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double __attribute__((overloadable))
+ rsGetElementAt_double(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double2 __attribute__((overloadable))
+ rsGetElementAt_double2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double3 __attribute__((overloadable))
+ rsGetElementAt_double3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double4 __attribute__((overloadable))
+ rsGetElementAt_double4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char __attribute__((overloadable))
+ rsGetElementAt_char(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char2 __attribute__((overloadable))
+ rsGetElementAt_char2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char3 __attribute__((overloadable))
+ rsGetElementAt_char3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char4 __attribute__((overloadable))
+ rsGetElementAt_char4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar __attribute__((overloadable))
+ rsGetElementAt_uchar(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar2 __attribute__((overloadable))
+ rsGetElementAt_uchar2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar3 __attribute__((overloadable))
+ rsGetElementAt_uchar3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar4 __attribute__((overloadable))
+ rsGetElementAt_uchar4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short __attribute__((overloadable))
+ rsGetElementAt_short(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short2 __attribute__((overloadable))
+ rsGetElementAt_short2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short3 __attribute__((overloadable))
+ rsGetElementAt_short3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short4 __attribute__((overloadable))
+ rsGetElementAt_short4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort __attribute__((overloadable))
+ rsGetElementAt_ushort(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort2 __attribute__((overloadable))
+ rsGetElementAt_ushort2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort3 __attribute__((overloadable))
+ rsGetElementAt_ushort3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort4 __attribute__((overloadable))
+ rsGetElementAt_ushort4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int __attribute__((overloadable))
+ rsGetElementAt_int(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int2 __attribute__((overloadable))
+ rsGetElementAt_int2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int3 __attribute__((overloadable))
+ rsGetElementAt_int3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int4 __attribute__((overloadable))
+ rsGetElementAt_int4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint __attribute__((overloadable))
+ rsGetElementAt_uint(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint2 __attribute__((overloadable))
+ rsGetElementAt_uint2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint3 __attribute__((overloadable))
+ rsGetElementAt_uint3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint4 __attribute__((overloadable))
+ rsGetElementAt_uint4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long __attribute__((overloadable))
+ rsGetElementAt_long(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long2 __attribute__((overloadable))
+ rsGetElementAt_long2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long3 __attribute__((overloadable))
+ rsGetElementAt_long3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long4 __attribute__((overloadable))
+ rsGetElementAt_long4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong __attribute__((overloadable))
+ rsGetElementAt_ulong(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong2 __attribute__((overloadable))
+ rsGetElementAt_ulong2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong3 __attribute__((overloadable))
+ rsGetElementAt_ulong3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong4 __attribute__((overloadable))
+ rsGetElementAt_ulong4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float __attribute__((overloadable))
+ rsGetElementAt_float(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float2 __attribute__((overloadable))
+ rsGetElementAt_float2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float3 __attribute__((overloadable))
+ rsGetElementAt_float3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float4 __attribute__((overloadable))
+ rsGetElementAt_float4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double __attribute__((overloadable))
+ rsGetElementAt_double(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double2 __attribute__((overloadable))
+ rsGetElementAt_double2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double3 __attribute__((overloadable))
+ rsGetElementAt_double3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double4 __attribute__((overloadable))
+ rsGetElementAt_double4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char __attribute__((overloadable))
+ rsGetElementAt_char(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char2 __attribute__((overloadable))
+ rsGetElementAt_char2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char3 __attribute__((overloadable))
+ rsGetElementAt_char3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char4 __attribute__((overloadable))
+ rsGetElementAt_char4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar __attribute__((overloadable))
+ rsGetElementAt_uchar(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar2 __attribute__((overloadable))
+ rsGetElementAt_uchar2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar3 __attribute__((overloadable))
+ rsGetElementAt_uchar3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar4 __attribute__((overloadable))
+ rsGetElementAt_uchar4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short __attribute__((overloadable))
+ rsGetElementAt_short(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short2 __attribute__((overloadable))
+ rsGetElementAt_short2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short3 __attribute__((overloadable))
+ rsGetElementAt_short3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short4 __attribute__((overloadable))
+ rsGetElementAt_short4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort __attribute__((overloadable))
+ rsGetElementAt_ushort(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort2 __attribute__((overloadable))
+ rsGetElementAt_ushort2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort3 __attribute__((overloadable))
+ rsGetElementAt_ushort3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort4 __attribute__((overloadable))
+ rsGetElementAt_ushort4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int __attribute__((overloadable))
+ rsGetElementAt_int(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int2 __attribute__((overloadable))
+ rsGetElementAt_int2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int3 __attribute__((overloadable))
+ rsGetElementAt_int3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int4 __attribute__((overloadable))
+ rsGetElementAt_int4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint __attribute__((overloadable))
+ rsGetElementAt_uint(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint2 __attribute__((overloadable))
+ rsGetElementAt_uint2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint3 __attribute__((overloadable))
+ rsGetElementAt_uint3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint4 __attribute__((overloadable))
+ rsGetElementAt_uint4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long __attribute__((overloadable))
+ rsGetElementAt_long(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long2 __attribute__((overloadable))
+ rsGetElementAt_long2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long3 __attribute__((overloadable))
+ rsGetElementAt_long3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long4 __attribute__((overloadable))
+ rsGetElementAt_long4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong __attribute__((overloadable))
+ rsGetElementAt_ulong(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong2 __attribute__((overloadable))
+ rsGetElementAt_ulong2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong3 __attribute__((overloadable))
+ rsGetElementAt_ulong3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong4 __attribute__((overloadable))
+ rsGetElementAt_ulong4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float __attribute__((overloadable))
+ rsGetElementAt_float(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float2 __attribute__((overloadable))
+ rsGetElementAt_float2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float3 __attribute__((overloadable))
+ rsGetElementAt_float3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float4 __attribute__((overloadable))
+ rsGetElementAt_float4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double __attribute__((overloadable))
+ rsGetElementAt_double(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double2 __attribute__((overloadable))
+ rsGetElementAt_double2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double3 __attribute__((overloadable))
+ rsGetElementAt_double3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern double4 __attribute__((overloadable))
+ rsGetElementAt_double4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char __attribute__((overloadable))
+ rsGetElementAt_char(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char2 __attribute__((overloadable))
+ rsGetElementAt_char2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char3 __attribute__((overloadable))
+ rsGetElementAt_char3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern char4 __attribute__((overloadable))
+ rsGetElementAt_char4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar __attribute__((overloadable))
+ rsGetElementAt_uchar(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar2 __attribute__((overloadable))
+ rsGetElementAt_uchar2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar3 __attribute__((overloadable))
+ rsGetElementAt_uchar3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar4 __attribute__((overloadable))
+ rsGetElementAt_uchar4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short __attribute__((overloadable))
+ rsGetElementAt_short(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short2 __attribute__((overloadable))
+ rsGetElementAt_short2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short3 __attribute__((overloadable))
+ rsGetElementAt_short3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern short4 __attribute__((overloadable))
+ rsGetElementAt_short4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort __attribute__((overloadable))
+ rsGetElementAt_ushort(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort2 __attribute__((overloadable))
+ rsGetElementAt_ushort2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort3 __attribute__((overloadable))
+ rsGetElementAt_ushort3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ushort4 __attribute__((overloadable))
+ rsGetElementAt_ushort4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int __attribute__((overloadable))
+ rsGetElementAt_int(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int2 __attribute__((overloadable))
+ rsGetElementAt_int2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int3 __attribute__((overloadable))
+ rsGetElementAt_int3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern int4 __attribute__((overloadable))
+ rsGetElementAt_int4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint __attribute__((overloadable))
+ rsGetElementAt_uint(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint2 __attribute__((overloadable))
+ rsGetElementAt_uint2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint3 __attribute__((overloadable))
+ rsGetElementAt_uint3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uint4 __attribute__((overloadable))
+ rsGetElementAt_uint4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long __attribute__((overloadable))
+ rsGetElementAt_long(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long2 __attribute__((overloadable))
+ rsGetElementAt_long2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long3 __attribute__((overloadable))
+ rsGetElementAt_long3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern long4 __attribute__((overloadable))
+ rsGetElementAt_long4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong __attribute__((overloadable))
+ rsGetElementAt_ulong(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong2 __attribute__((overloadable))
+ rsGetElementAt_ulong2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong3 __attribute__((overloadable))
+ rsGetElementAt_ulong3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern ulong4 __attribute__((overloadable))
+ rsGetElementAt_ulong4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half __attribute__((overloadable))
+ rsGetElementAt_half(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half2 __attribute__((overloadable))
+ rsGetElementAt_half2(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half3 __attribute__((overloadable))
+ rsGetElementAt_half3(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half4 __attribute__((overloadable))
+ rsGetElementAt_half4(rs_allocation a, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half __attribute__((overloadable))
+ rsGetElementAt_half(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half2 __attribute__((overloadable))
+ rsGetElementAt_half2(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half3 __attribute__((overloadable))
+ rsGetElementAt_half3(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half4 __attribute__((overloadable))
+ rsGetElementAt_half4(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half __attribute__((overloadable))
+ rsGetElementAt_half(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half2 __attribute__((overloadable))
+ rsGetElementAt_half2(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half3 __attribute__((overloadable))
+ rsGetElementAt_half3(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern half4 __attribute__((overloadable))
+ rsGetElementAt_half4(rs_allocation a, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+/*
+ * rsGetElementAtYuv_uchar_U: Get the U component of an allocation of YUVs
+ *
+ * Extracts the U component of a single YUV value from a 2D allocation of YUVs.
+ *
+ * Inside an allocation, Y, U, and V components may be stored if different planes
+ * and at different resolutions. The x, y coordinates provided here are in the
+ * dimensions of the Y plane.
+ *
+ * See rsGetElementAtYuv_uchar_Y().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar __attribute__((overloadable))
+ rsGetElementAtYuv_uchar_U(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+/*
+ * rsGetElementAtYuv_uchar_V: Get the V component of an allocation of YUVs
+ *
+ * Extracts the V component of a single YUV value from a 2D allocation of YUVs.
+ *
+ * Inside an allocation, Y, U, and V components may be stored if different planes
+ * and at different resolutions. The x, y coordinates provided here are in the
+ * dimensions of the Y plane.
+ *
+ * See rsGetElementAtYuv_uchar_Y().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar __attribute__((overloadable))
+ rsGetElementAtYuv_uchar_V(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+/*
+ * rsGetElementAtYuv_uchar_Y: Get the Y component of an allocation of YUVs
+ *
+ * Extracts the Y component of a single YUV value from a 2D allocation of YUVs.
+ *
+ * Inside an allocation, Y, U, and V components may be stored if different planes
+ * and at different resolutions. The x, y coordinates provided here are in the
+ * dimensions of the Y plane.
+ *
+ * See rsGetElementAtYuv_uchar_U() and rsGetElementAtYuv_uchar_V().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern uchar __attribute__((overloadable))
+ rsGetElementAtYuv_uchar_Y(rs_allocation a, uint32_t x, uint32_t y);
+#endif
+
+/*
+ * rsSample: Sample a value from a texture allocation
+ *
+ * Fetches a value from a texture allocation in a way described by the sampler.
+ *
+ * If your allocation is 1D, use the variant with float for location. For 2D,
+ * use the float2 variant.
+ *
+ * See android.renderscript.Sampler for more details.
+ *
+ * Parameters:
+ * a: Allocation to sample from.
+ * s: Sampler state.
+ * location: Location to sample from.
+ * lod: Mip level to sample from, for fractional values mip levels will be interpolated if RS_SAMPLER_LINEAR_MIP_LINEAR is used.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern float4 __attribute__((overloadable))
+ rsSample(rs_allocation a, rs_sampler s, float location);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern float4 __attribute__((overloadable))
+ rsSample(rs_allocation a, rs_sampler s, float location, float lod);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern float4 __attribute__((overloadable))
+ rsSample(rs_allocation a, rs_sampler s, float2 location);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern float4 __attribute__((overloadable))
+ rsSample(rs_allocation a, rs_sampler s, float2 location, float lod);
+#endif
+
+/*
+ * rsSetElementAt: Set a cell of an allocation
+ *
+ * This function stores a value into a single cell of an allocation.
+ *
+ * When storing into a three dimensional allocations, use the x, y, z variant.
+ * Similarly, use the x, y variant for two dimensional allocations and x for
+ * the mono dimensional allocations.
+ *
+ * This function has two styles. One passes the value to be stored using a void*,
+ * the other has the actual value as an argument, e.g. rsSetElementAt() vs.
+ * rsSetElementAt_int4(). For primitive types, always use the latter as it is
+ * more efficient.
+ *
+ * See also rsGetElementAt().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt(rs_allocation a, void* ptr, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt(rs_allocation a, void* ptr, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float(rs_allocation a, float val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float2(rs_allocation a, float2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float3(rs_allocation a, float3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float4(rs_allocation a, float4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double(rs_allocation a, double val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double2(rs_allocation a, double2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double3(rs_allocation a, double3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double4(rs_allocation a, double4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char(rs_allocation a, char val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char2(rs_allocation a, char2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char3(rs_allocation a, char3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char4(rs_allocation a, char4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar(rs_allocation a, uchar val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar2(rs_allocation a, uchar2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar3(rs_allocation a, uchar3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar4(rs_allocation a, uchar4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short(rs_allocation a, short val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short2(rs_allocation a, short2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short3(rs_allocation a, short3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short4(rs_allocation a, short4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort(rs_allocation a, ushort val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort2(rs_allocation a, ushort2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort3(rs_allocation a, ushort3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort4(rs_allocation a, ushort4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int(rs_allocation a, int val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int2(rs_allocation a, int2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int3(rs_allocation a, int3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int4(rs_allocation a, int4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint(rs_allocation a, uint val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint2(rs_allocation a, uint2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint3(rs_allocation a, uint3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint4(rs_allocation a, uint4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long(rs_allocation a, long val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long2(rs_allocation a, long2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long3(rs_allocation a, long3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long4(rs_allocation a, long4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong(rs_allocation a, ulong val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong2(rs_allocation a, ulong2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong3(rs_allocation a, ulong3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong4(rs_allocation a, ulong4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float(rs_allocation a, float val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float2(rs_allocation a, float2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float3(rs_allocation a, float3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float4(rs_allocation a, float4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double(rs_allocation a, double val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double2(rs_allocation a, double2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double3(rs_allocation a, double3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double4(rs_allocation a, double4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char(rs_allocation a, char val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char2(rs_allocation a, char2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char3(rs_allocation a, char3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char4(rs_allocation a, char4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar(rs_allocation a, uchar val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar2(rs_allocation a, uchar2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar3(rs_allocation a, uchar3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar4(rs_allocation a, uchar4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short(rs_allocation a, short val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short2(rs_allocation a, short2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short3(rs_allocation a, short3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short4(rs_allocation a, short4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort(rs_allocation a, ushort val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort2(rs_allocation a, ushort2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort3(rs_allocation a, ushort3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort4(rs_allocation a, ushort4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int(rs_allocation a, int val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int2(rs_allocation a, int2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int3(rs_allocation a, int3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int4(rs_allocation a, int4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint(rs_allocation a, uint val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint2(rs_allocation a, uint2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint3(rs_allocation a, uint3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint4(rs_allocation a, uint4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long(rs_allocation a, long val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long2(rs_allocation a, long2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long3(rs_allocation a, long3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long4(rs_allocation a, long4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong(rs_allocation a, ulong val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong2(rs_allocation a, ulong2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong3(rs_allocation a, ulong3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong4(rs_allocation a, ulong4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float(rs_allocation a, float val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float2(rs_allocation a, float2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float3(rs_allocation a, float3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_float4(rs_allocation a, float4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double(rs_allocation a, double val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double2(rs_allocation a, double2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double3(rs_allocation a, double3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_double4(rs_allocation a, double4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char(rs_allocation a, char val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char2(rs_allocation a, char2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char3(rs_allocation a, char3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_char4(rs_allocation a, char4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar(rs_allocation a, uchar val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar2(rs_allocation a, uchar2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar3(rs_allocation a, uchar3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uchar4(rs_allocation a, uchar4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short(rs_allocation a, short val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short2(rs_allocation a, short2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short3(rs_allocation a, short3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_short4(rs_allocation a, short4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort(rs_allocation a, ushort val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort2(rs_allocation a, ushort2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort3(rs_allocation a, ushort3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ushort4(rs_allocation a, ushort4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int(rs_allocation a, int val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int2(rs_allocation a, int2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int3(rs_allocation a, int3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_int4(rs_allocation a, int4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint(rs_allocation a, uint val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint2(rs_allocation a, uint2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint3(rs_allocation a, uint3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_uint4(rs_allocation a, uint4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long(rs_allocation a, long val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long2(rs_allocation a, long2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long3(rs_allocation a, long3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_long4(rs_allocation a, long4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong(rs_allocation a, ulong val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong2(rs_allocation a, ulong2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong3(rs_allocation a, ulong3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern void __attribute__((overloadable))
+ rsSetElementAt_ulong4(rs_allocation a, ulong4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half(rs_allocation a, half val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half2(rs_allocation a, half2 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half3(rs_allocation a, half3 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half4(rs_allocation a, half4 val, uint32_t x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half(rs_allocation a, half val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half2(rs_allocation a, half2 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half3(rs_allocation a, half3 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half4(rs_allocation a, half4 val, uint32_t x, uint32_t y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half(rs_allocation a, half val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half2(rs_allocation a, half2 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half3(rs_allocation a, half3 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsSetElementAt_half4(rs_allocation a, half4 val, uint32_t x, uint32_t y, uint32_t z);
+#endif
+
+#endif // RENDERSCRIPT_RS_ALLOCATION_DATA_RSH
diff --git a/current/platform/rs/scriptc/rs_atomic.rsh b/current/platform/rs/scriptc/rs_atomic.rsh
new file mode 100644
index 0000000..98a8784
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_atomic.rsh
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_atomic.rsh: Atomic Update Functions
+ *
+ * To update values shared between multiple threads, use the functions below.
+ * They ensure that the values are atomically updated, i.e. that the memory
+ * reads, the updates, and the memory writes are done in the right order.
+ *
+ * These functions are slower than their non-atomic equivalents, so use
+ * them only when synchronization is needed.
+ *
+ * Note that in RenderScript, your code is likely to be running in separate
+ * threads even though you did not explicitely create them. The RenderScript
+ * runtime will very often split the execution of one kernel across multiple
+ * threads. Updating globals should be done with atomic functions. If possible,
+ * modify your algorithm to avoid them altogether.
+ */
+
+#ifndef RENDERSCRIPT_RS_ATOMIC_RSH
+#define RENDERSCRIPT_RS_ATOMIC_RSH
+
+/*
+ * rsAtomicAdd: Thread-safe addition
+ *
+ * Atomicly adds a value to the value at addr, i.e. *addr += value.
+ *
+ * Parameters:
+ * addr: Address of the value to modify.
+ * value: Amount to add.
+ *
+ * Returns: Value of *addr prior to the operation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern int32_t __attribute__((overloadable))
+ rsAtomicAdd(volatile int32_t* addr, int32_t value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+extern int32_t __attribute__((overloadable))
+ rsAtomicAdd(volatile uint32_t* addr, uint32_t value);
+#endif
+
+/*
+ * rsAtomicAnd: Thread-safe bitwise and
+ *
+ * Atomicly performs a bitwise and of two values, storing the result back at addr,
+ * i.e. *addr &= value.
+ *
+ * Parameters:
+ * addr: Address of the value to modify.
+ * value: Value to and with.
+ *
+ * Returns: Value of *addr prior to the operation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern int32_t __attribute__((overloadable))
+ rsAtomicAnd(volatile int32_t* addr, int32_t value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+extern int32_t __attribute__((overloadable))
+ rsAtomicAnd(volatile uint32_t* addr, uint32_t value);
+#endif
+
+/*
+ * rsAtomicCas: Thread-safe compare and set
+ *
+ * If the value at addr matches compareValue then the newValue is written at addr,
+ * i.e. if (*addr == compareValue) { *addr = newValue; }.
+ *
+ * You can check that the value was written by checking that the value returned
+ * by rsAtomicCas() is compareValue.
+ *
+ * Parameters:
+ * addr: Address of the value to compare and replace if the test passes.
+ * compareValue: Value to test *addr against.
+ * newValue: Value to write if the test passes.
+ *
+ * Returns: Value of *addr prior to the operation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern int32_t __attribute__((overloadable))
+ rsAtomicCas(volatile int32_t* addr, int32_t compareValue, int32_t newValue);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern uint32_t __attribute__((overloadable))
+ rsAtomicCas(volatile uint32_t* addr, uint32_t compareValue, uint32_t newValue);
+#endif
+
+/*
+ * rsAtomicDec: Thread-safe decrement
+ *
+ * Atomicly subtracts one from the value at addr. This is equivalent to rsAtomicSub(addr, 1).
+ *
+ * Parameters:
+ * addr: Address of the value to decrement.
+ *
+ * Returns: Value of *addr prior to the operation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern int32_t __attribute__((overloadable))
+ rsAtomicDec(volatile int32_t* addr);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+extern int32_t __attribute__((overloadable))
+ rsAtomicDec(volatile uint32_t* addr);
+#endif
+
+/*
+ * rsAtomicInc: Thread-safe increment
+ *
+ * Atomicly adds one to the value at addr. This is equivalent to rsAtomicAdd(addr, 1).
+ *
+ * Parameters:
+ * addr: Address of the value to increment.
+ *
+ * Returns: Value of *addr prior to the operation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern int32_t __attribute__((overloadable))
+ rsAtomicInc(volatile int32_t* addr);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+extern int32_t __attribute__((overloadable))
+ rsAtomicInc(volatile uint32_t* addr);
+#endif
+
+/*
+ * rsAtomicMax: Thread-safe maximum
+ *
+ * Atomicly sets the value at addr to the maximum of *addr and value, i.e.
+ * *addr = max(*addr, value).
+ *
+ * Parameters:
+ * addr: Address of the value to modify.
+ * value: Comparison value.
+ *
+ * Returns: Value of *addr prior to the operation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern uint32_t __attribute__((overloadable))
+ rsAtomicMax(volatile uint32_t* addr, uint32_t value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern int32_t __attribute__((overloadable))
+ rsAtomicMax(volatile int32_t* addr, int32_t value);
+#endif
+
+/*
+ * rsAtomicMin: Thread-safe minimum
+ *
+ * Atomicly sets the value at addr to the minimum of *addr and value, i.e.
+ * *addr = min(*addr, value).
+ *
+ * Parameters:
+ * addr: Address of the value to modify.
+ * value: Comparison value.
+ *
+ * Returns: Value of *addr prior to the operation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern uint32_t __attribute__((overloadable))
+ rsAtomicMin(volatile uint32_t* addr, uint32_t value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern int32_t __attribute__((overloadable))
+ rsAtomicMin(volatile int32_t* addr, int32_t value);
+#endif
+
+/*
+ * rsAtomicOr: Thread-safe bitwise or
+ *
+ * Atomicly perform a bitwise or two values, storing the result at addr,
+ * i.e. *addr |= value.
+ *
+ * Parameters:
+ * addr: Address of the value to modify.
+ * value: Value to or with.
+ *
+ * Returns: Value of *addr prior to the operation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern int32_t __attribute__((overloadable))
+ rsAtomicOr(volatile int32_t* addr, int32_t value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+extern int32_t __attribute__((overloadable))
+ rsAtomicOr(volatile uint32_t* addr, uint32_t value);
+#endif
+
+/*
+ * rsAtomicSub: Thread-safe subtraction
+ *
+ * Atomicly subtracts a value from the value at addr, i.e. *addr -= value.
+ *
+ * Parameters:
+ * addr: Address of the value to modify.
+ * value: Amount to subtract.
+ *
+ * Returns: Value of *addr prior to the operation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern int32_t __attribute__((overloadable))
+ rsAtomicSub(volatile int32_t* addr, int32_t value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+extern int32_t __attribute__((overloadable))
+ rsAtomicSub(volatile uint32_t* addr, uint32_t value);
+#endif
+
+/*
+ * rsAtomicXor: Thread-safe bitwise exclusive or
+ *
+ * Atomicly performs a bitwise xor of two values, storing the result at addr,
+ * i.e. *addr ^= value.
+ *
+ * Parameters:
+ * addr: Address of the value to modify.
+ * value: Value to xor with.
+ *
+ * Returns: Value of *addr prior to the operation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern int32_t __attribute__((overloadable))
+ rsAtomicXor(volatile int32_t* addr, int32_t value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+extern int32_t __attribute__((overloadable))
+ rsAtomicXor(volatile uint32_t* addr, uint32_t value);
+#endif
+
+#endif // RENDERSCRIPT_RS_ATOMIC_RSH
diff --git a/current/platform/rs/scriptc/rs_convert.rsh b/current/platform/rs/scriptc/rs_convert.rsh
new file mode 100644
index 0000000..4c318d4
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_convert.rsh
@@ -0,0 +1,1623 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_convert.rsh: Conversion Functions
+ *
+ * The functions below convert from a numerical vector type to another, or from one color
+ * representation to another.
+ */
+
+#ifndef RENDERSCRIPT_RS_CONVERT_RSH
+#define RENDERSCRIPT_RS_CONVERT_RSH
+
+/*
+ * convert: Convert numerical vectors
+ *
+ * Converts a vector from one numerical type to another. The conversion are done entry per entry.
+ *
+ * E.g calling a = convert_short3(b); is equivalent to doing
+ * a.x = (short)b.x; a.y = (short)b.y; a.z = (short)b.z;.
+ *
+ * Converting floating point values to integer types truncates.
+ *
+ * Converting numbers too large to fit the destination type yields undefined results.
+ * For example, converting a float that contains 1.0e18 to a short is undefined.
+ * Use clamp() to avoid this.
+ */
+extern float2 __attribute__((const, overloadable))
+ convert_float2(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ convert_float3(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ convert_float4(float4 v);
+
+extern float2 __attribute__((const, overloadable))
+ convert_float2(char2 v);
+
+extern float3 __attribute__((const, overloadable))
+ convert_float3(char3 v);
+
+extern float4 __attribute__((const, overloadable))
+ convert_float4(char4 v);
+
+extern float2 __attribute__((const, overloadable))
+ convert_float2(uchar2 v);
+
+extern float3 __attribute__((const, overloadable))
+ convert_float3(uchar3 v);
+
+extern float4 __attribute__((const, overloadable))
+ convert_float4(uchar4 v);
+
+extern float2 __attribute__((const, overloadable))
+ convert_float2(short2 v);
+
+extern float3 __attribute__((const, overloadable))
+ convert_float3(short3 v);
+
+extern float4 __attribute__((const, overloadable))
+ convert_float4(short4 v);
+
+extern float2 __attribute__((const, overloadable))
+ convert_float2(ushort2 v);
+
+extern float3 __attribute__((const, overloadable))
+ convert_float3(ushort3 v);
+
+extern float4 __attribute__((const, overloadable))
+ convert_float4(ushort4 v);
+
+extern float2 __attribute__((const, overloadable))
+ convert_float2(int2 v);
+
+extern float3 __attribute__((const, overloadable))
+ convert_float3(int3 v);
+
+extern float4 __attribute__((const, overloadable))
+ convert_float4(int4 v);
+
+extern float2 __attribute__((const, overloadable))
+ convert_float2(uint2 v);
+
+extern float3 __attribute__((const, overloadable))
+ convert_float3(uint3 v);
+
+extern float4 __attribute__((const, overloadable))
+ convert_float4(uint4 v);
+
+extern char2 __attribute__((const, overloadable))
+ convert_char2(float2 v);
+
+extern char3 __attribute__((const, overloadable))
+ convert_char3(float3 v);
+
+extern char4 __attribute__((const, overloadable))
+ convert_char4(float4 v);
+
+extern char2 __attribute__((const, overloadable))
+ convert_char2(char2 v);
+
+extern char3 __attribute__((const, overloadable))
+ convert_char3(char3 v);
+
+extern char4 __attribute__((const, overloadable))
+ convert_char4(char4 v);
+
+extern char2 __attribute__((const, overloadable))
+ convert_char2(uchar2 v);
+
+extern char3 __attribute__((const, overloadable))
+ convert_char3(uchar3 v);
+
+extern char4 __attribute__((const, overloadable))
+ convert_char4(uchar4 v);
+
+extern char2 __attribute__((const, overloadable))
+ convert_char2(short2 v);
+
+extern char3 __attribute__((const, overloadable))
+ convert_char3(short3 v);
+
+extern char4 __attribute__((const, overloadable))
+ convert_char4(short4 v);
+
+extern char2 __attribute__((const, overloadable))
+ convert_char2(ushort2 v);
+
+extern char3 __attribute__((const, overloadable))
+ convert_char3(ushort3 v);
+
+extern char4 __attribute__((const, overloadable))
+ convert_char4(ushort4 v);
+
+extern char2 __attribute__((const, overloadable))
+ convert_char2(int2 v);
+
+extern char3 __attribute__((const, overloadable))
+ convert_char3(int3 v);
+
+extern char4 __attribute__((const, overloadable))
+ convert_char4(int4 v);
+
+extern char2 __attribute__((const, overloadable))
+ convert_char2(uint2 v);
+
+extern char3 __attribute__((const, overloadable))
+ convert_char3(uint3 v);
+
+extern char4 __attribute__((const, overloadable))
+ convert_char4(uint4 v);
+
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(float2 v);
+
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(float3 v);
+
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(float4 v);
+
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(char2 v);
+
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(char3 v);
+
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(char4 v);
+
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(uchar2 v);
+
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(uchar3 v);
+
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(uchar4 v);
+
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(short2 v);
+
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(short3 v);
+
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(short4 v);
+
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(ushort2 v);
+
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(ushort3 v);
+
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(ushort4 v);
+
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(int2 v);
+
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(int3 v);
+
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(int4 v);
+
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(uint2 v);
+
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(uint3 v);
+
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(uint4 v);
+
+extern short2 __attribute__((const, overloadable))
+ convert_short2(float2 v);
+
+extern short3 __attribute__((const, overloadable))
+ convert_short3(float3 v);
+
+extern short4 __attribute__((const, overloadable))
+ convert_short4(float4 v);
+
+extern short2 __attribute__((const, overloadable))
+ convert_short2(char2 v);
+
+extern short3 __attribute__((const, overloadable))
+ convert_short3(char3 v);
+
+extern short4 __attribute__((const, overloadable))
+ convert_short4(char4 v);
+
+extern short2 __attribute__((const, overloadable))
+ convert_short2(uchar2 v);
+
+extern short3 __attribute__((const, overloadable))
+ convert_short3(uchar3 v);
+
+extern short4 __attribute__((const, overloadable))
+ convert_short4(uchar4 v);
+
+extern short2 __attribute__((const, overloadable))
+ convert_short2(short2 v);
+
+extern short3 __attribute__((const, overloadable))
+ convert_short3(short3 v);
+
+extern short4 __attribute__((const, overloadable))
+ convert_short4(short4 v);
+
+extern short2 __attribute__((const, overloadable))
+ convert_short2(ushort2 v);
+
+extern short3 __attribute__((const, overloadable))
+ convert_short3(ushort3 v);
+
+extern short4 __attribute__((const, overloadable))
+ convert_short4(ushort4 v);
+
+extern short2 __attribute__((const, overloadable))
+ convert_short2(int2 v);
+
+extern short3 __attribute__((const, overloadable))
+ convert_short3(int3 v);
+
+extern short4 __attribute__((const, overloadable))
+ convert_short4(int4 v);
+
+extern short2 __attribute__((const, overloadable))
+ convert_short2(uint2 v);
+
+extern short3 __attribute__((const, overloadable))
+ convert_short3(uint3 v);
+
+extern short4 __attribute__((const, overloadable))
+ convert_short4(uint4 v);
+
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(float2 v);
+
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(float3 v);
+
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(float4 v);
+
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(char2 v);
+
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(char3 v);
+
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(char4 v);
+
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(uchar2 v);
+
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(uchar3 v);
+
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(uchar4 v);
+
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(short2 v);
+
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(short3 v);
+
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(short4 v);
+
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(ushort2 v);
+
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(ushort3 v);
+
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(ushort4 v);
+
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(int2 v);
+
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(int3 v);
+
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(int4 v);
+
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(uint2 v);
+
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(uint3 v);
+
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(uint4 v);
+
+extern int2 __attribute__((const, overloadable))
+ convert_int2(float2 v);
+
+extern int3 __attribute__((const, overloadable))
+ convert_int3(float3 v);
+
+extern int4 __attribute__((const, overloadable))
+ convert_int4(float4 v);
+
+extern int2 __attribute__((const, overloadable))
+ convert_int2(char2 v);
+
+extern int3 __attribute__((const, overloadable))
+ convert_int3(char3 v);
+
+extern int4 __attribute__((const, overloadable))
+ convert_int4(char4 v);
+
+extern int2 __attribute__((const, overloadable))
+ convert_int2(uchar2 v);
+
+extern int3 __attribute__((const, overloadable))
+ convert_int3(uchar3 v);
+
+extern int4 __attribute__((const, overloadable))
+ convert_int4(uchar4 v);
+
+extern int2 __attribute__((const, overloadable))
+ convert_int2(short2 v);
+
+extern int3 __attribute__((const, overloadable))
+ convert_int3(short3 v);
+
+extern int4 __attribute__((const, overloadable))
+ convert_int4(short4 v);
+
+extern int2 __attribute__((const, overloadable))
+ convert_int2(ushort2 v);
+
+extern int3 __attribute__((const, overloadable))
+ convert_int3(ushort3 v);
+
+extern int4 __attribute__((const, overloadable))
+ convert_int4(ushort4 v);
+
+extern int2 __attribute__((const, overloadable))
+ convert_int2(int2 v);
+
+extern int3 __attribute__((const, overloadable))
+ convert_int3(int3 v);
+
+extern int4 __attribute__((const, overloadable))
+ convert_int4(int4 v);
+
+extern int2 __attribute__((const, overloadable))
+ convert_int2(uint2 v);
+
+extern int3 __attribute__((const, overloadable))
+ convert_int3(uint3 v);
+
+extern int4 __attribute__((const, overloadable))
+ convert_int4(uint4 v);
+
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(float2 v);
+
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(float3 v);
+
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(float4 v);
+
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(char2 v);
+
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(char3 v);
+
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(char4 v);
+
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(uchar2 v);
+
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(uchar3 v);
+
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(uchar4 v);
+
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(short2 v);
+
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(short3 v);
+
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(short4 v);
+
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(ushort2 v);
+
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(ushort3 v);
+
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(ushort4 v);
+
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(int2 v);
+
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(int3 v);
+
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(int4 v);
+
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(uint2 v);
+
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(uint3 v);
+
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(uint4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(ulong4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(ulong4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(ulong4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ convert_float2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ convert_float3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ convert_float4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ convert_float2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ convert_float3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ convert_float4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ convert_float2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ convert_float3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ convert_float4(ulong4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char2 __attribute__((const, overloadable))
+ convert_char2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char3 __attribute__((const, overloadable))
+ convert_char3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char4 __attribute__((const, overloadable))
+ convert_char4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char2 __attribute__((const, overloadable))
+ convert_char2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char3 __attribute__((const, overloadable))
+ convert_char3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char4 __attribute__((const, overloadable))
+ convert_char4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char2 __attribute__((const, overloadable))
+ convert_char2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char3 __attribute__((const, overloadable))
+ convert_char3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char4 __attribute__((const, overloadable))
+ convert_char4(ulong4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(ulong4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short2 __attribute__((const, overloadable))
+ convert_short2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short3 __attribute__((const, overloadable))
+ convert_short3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short4 __attribute__((const, overloadable))
+ convert_short4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short2 __attribute__((const, overloadable))
+ convert_short2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short3 __attribute__((const, overloadable))
+ convert_short3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short4 __attribute__((const, overloadable))
+ convert_short4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short2 __attribute__((const, overloadable))
+ convert_short2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short3 __attribute__((const, overloadable))
+ convert_short3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short4 __attribute__((const, overloadable))
+ convert_short4(ulong4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(ulong4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int2 __attribute__((const, overloadable))
+ convert_int2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int3 __attribute__((const, overloadable))
+ convert_int3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int4 __attribute__((const, overloadable))
+ convert_int4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int2 __attribute__((const, overloadable))
+ convert_int2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int3 __attribute__((const, overloadable))
+ convert_int3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int4 __attribute__((const, overloadable))
+ convert_int4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int2 __attribute__((const, overloadable))
+ convert_int2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int3 __attribute__((const, overloadable))
+ convert_int3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int4 __attribute__((const, overloadable))
+ convert_int4(ulong4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(ulong4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(char2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(char3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(char4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(uchar2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(uchar3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(uchar4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(short2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(short3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(short4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(ushort2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(ushort3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(ushort4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(int2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(int3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(int4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(uint2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(uint3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(uint4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(char2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(char3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(char4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(uchar2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(uchar3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(uchar4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(short2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(short3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(short4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(ushort2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(ushort3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(ushort4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(int2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(int3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(int4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(uint2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(uint3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(uint4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(char2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(char3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(char4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(uchar2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(uchar3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(uchar4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(short2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(short3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(short4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(ushort2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(ushort3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(ushort4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(int2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(int3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(int4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(uint2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(uint3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(uint4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern float2 __attribute__((const, overloadable))
+ convert_float2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern float3 __attribute__((const, overloadable))
+ convert_float3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern float4 __attribute__((const, overloadable))
+ convert_float4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern double2 __attribute__((const, overloadable))
+ convert_double2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern double3 __attribute__((const, overloadable))
+ convert_double3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern double4 __attribute__((const, overloadable))
+ convert_double4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern char2 __attribute__((const, overloadable))
+ convert_char2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern char3 __attribute__((const, overloadable))
+ convert_char3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern char4 __attribute__((const, overloadable))
+ convert_char4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern uchar2 __attribute__((const, overloadable))
+ convert_uchar2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern uchar3 __attribute__((const, overloadable))
+ convert_uchar3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern uchar4 __attribute__((const, overloadable))
+ convert_uchar4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern short2 __attribute__((const, overloadable))
+ convert_short2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern short3 __attribute__((const, overloadable))
+ convert_short3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern short4 __attribute__((const, overloadable))
+ convert_short4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern ushort2 __attribute__((const, overloadable))
+ convert_ushort2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern ushort3 __attribute__((const, overloadable))
+ convert_ushort3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern ushort4 __attribute__((const, overloadable))
+ convert_ushort4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern int2 __attribute__((const, overloadable))
+ convert_int2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern int3 __attribute__((const, overloadable))
+ convert_int3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern int4 __attribute__((const, overloadable))
+ convert_int4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern uint2 __attribute__((const, overloadable))
+ convert_uint2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern uint3 __attribute__((const, overloadable))
+ convert_uint3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern uint4 __attribute__((const, overloadable))
+ convert_uint4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern long2 __attribute__((const, overloadable))
+ convert_long2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern long3 __attribute__((const, overloadable))
+ convert_long3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern long4 __attribute__((const, overloadable))
+ convert_long4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern ulong2 __attribute__((const, overloadable))
+ convert_ulong2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern ulong3 __attribute__((const, overloadable))
+ convert_ulong3(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern ulong4 __attribute__((const, overloadable))
+ convert_ulong4(half4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(double2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(double3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(double4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(char2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(char3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(char4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(uchar2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(uchar3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(uchar4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(short2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(short3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(short4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(ushort2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(ushort3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(ushort4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(int2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(int3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(int4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(uint2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(uint3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(uint4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(long2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(long3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(long4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ convert_half2(ulong2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ convert_half3(ulong3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ convert_half4(ulong4 v);
+#endif
+
+/*
+ * rsPackColorTo8888: Create a uchar4 RGBA from floats
+ *
+ * Packs three or four floating point RGBA values into a uchar4.
+ *
+ * The input values are typically between 0.0f and 1.0f inclusive. For input values outside
+ * of this range, the resulting outputs will be clamped to be between 0 and 255. As this
+ * clamping may be done after the input is multiplied by 255.f and converted to an integer,
+ * input numbers greater than INT_MAX/255.f or less than INT_MIN/255.f result in
+ * undefined behavior.
+ *
+ * If the alpha component is not specified, it is assumed to be 1.0, i.e. the result will
+ * have an alpha set to 255.
+ *
+ * Parameters:
+ * r: Red component.
+ * g: Green component.
+ * b: Blue component.
+ * a: Alpha component.
+ * color: Vector of 3 or 4 floats containing the R, G, B, and A values.
+ */
+extern uchar4 __attribute__((const, overloadable))
+ rsPackColorTo8888(float r, float g, float b);
+
+extern uchar4 __attribute__((const, overloadable))
+ rsPackColorTo8888(float r, float g, float b, float a);
+
+extern uchar4 __attribute__((const, overloadable))
+ rsPackColorTo8888(float3 color);
+
+extern uchar4 __attribute__((const, overloadable))
+ rsPackColorTo8888(float4 color);
+
+/*
+ * rsUnpackColor8888: Create a float4 RGBA from uchar4
+ *
+ * Unpacks a uchar4 color to float4. The resulting floats will be between 0.0 and 1.0 inclusive.
+ */
+extern float4 __attribute__((const))
+ rsUnpackColor8888(uchar4 c);
+
+/*
+ * rsYuvToRGBA: Convert a YUV value to RGBA
+ *
+ * Converts a color from a YUV representation to RGBA.
+ *
+ * We currently don't provide a function to do the reverse conversion.
+ *
+ * Parameters:
+ * y: Luminance component.
+ * u: U chrominance component.
+ * v: V chrominance component.
+ */
+extern float4 __attribute__((const, overloadable))
+ rsYuvToRGBA_float4(uchar y, uchar u, uchar v);
+
+extern uchar4 __attribute__((const, overloadable))
+ rsYuvToRGBA_uchar4(uchar y, uchar u, uchar v);
+
+#endif // RENDERSCRIPT_RS_CONVERT_RSH
diff --git a/current/platform/rs/scriptc/rs_core.rsh b/current/platform/rs/scriptc/rs_core.rsh
new file mode 100644
index 0000000..ae93d60
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_core.rsh
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_core.rsh: Overview
+ *
+ * RenderScript is a high-performance runtime that provides compute operations at the native level.
+ * RenderScript code is compiled on devices at runtime to allow platform-independence as well.
+ *
+ * This reference documentation describes the RenderScript runtime APIs, which you can utilize
+ * to write RenderScript code in C99. The RenderScript compute header files are automatically
+ * included for you.
+ *
+ * To use RenderScript, you need to utilize the RenderScript runtime APIs documented here as well
+ * as the Android framework APIs for RenderScript. For documentation on the Android framework
+ * APIs, see the android.renderscript package reference.
+ *
+ * For more information on how to develop with RenderScript and how the runtime and Android
+ * framework APIs interact, see the RenderScript developer guide and the RenderScript samples.
+ */
+
+#ifndef RENDERSCRIPT_RS_CORE_RSH
+#define RENDERSCRIPT_RS_CORE_RSH
+
+#define RS_KERNEL __attribute__((kernel))
+
+#include "stdbool.h"
+
+#include "rs_value_types.rsh"
+#include "rs_object_types.rsh"
+
+#include "rs_allocation_create.rsh"
+#include "rs_allocation_data.rsh"
+#include "rs_atomic.rsh"
+#include "rs_convert.rsh"
+#include "rs_debug.rsh"
+#include "rs_for_each.rsh"
+#include "rs_io.rsh"
+#include "rs_math.rsh"
+#include "rs_matrix.rsh"
+#include "rs_object_info.rsh"
+#include "rs_quaternion.rsh"
+#include "rs_time.rsh"
+#include "rs_vector_math.rsh"
+
+#endif // RENDERSCRIPT_RS_CORE_RSH
diff --git a/current/platform/rs/scriptc/rs_debug.rsh b/current/platform/rs/scriptc/rs_debug.rsh
new file mode 100644
index 0000000..13c5faa
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_debug.rsh
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_debug.rsh: Debugging Functions
+ *
+ * The functions below are intended to be used during application developement.
+ * They should not be used in shipping applications.
+ */
+
+#ifndef RENDERSCRIPT_RS_DEBUG_RSH
+#define RENDERSCRIPT_RS_DEBUG_RSH
+
+#define RS_DEBUG(a) rsDebug(#a, a)
+#define RS_DEBUG_MARKER rsDebug(__FILE__, __LINE__)
+
+/*
+ * rsDebug: Log a message and values
+ *
+ * This function prints a message to the standard log, followed by the provided values.
+ *
+ * This function is intended for debugging only and should not be used in shipping
+ * applications.
+ */
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, double a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, int a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, uint a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, long a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, ulong a);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, int2 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, int3 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, int4 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, uint2 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, uint3 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, uint4 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, long2 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, long3 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, long4 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, ulong2 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, ulong3 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, ulong4 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, double2 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, double3 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, double4 a);
+#endif
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, float a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, float2 a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, float3 a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, float4 a);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, half a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, half2 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, half3 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, half4 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, char a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, char2 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, char3 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, char4 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, uchar a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, uchar2 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, uchar3 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, uchar4 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, short a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, short2 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, short3 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, short4 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, ushort a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, ushort2 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, ushort3 a);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, ushort4 a);
+#endif
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, float a, float b);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, float a, float b, float c);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, float a, float b, float c, float d);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, long long a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, unsigned long long a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, const void* a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, const rs_matrix4x4* a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, const rs_matrix3x3* a);
+
+extern void __attribute__((overloadable))
+ rsDebug(const char* message, const rs_matrix2x2* a);
+
+#endif // RENDERSCRIPT_RS_DEBUG_RSH
diff --git a/current/platform/rs/scriptc/rs_for_each.rsh b/current/platform/rs/scriptc/rs_for_each.rsh
new file mode 100644
index 0000000..bcc5db9
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_for_each.rsh
@@ -0,0 +1,434 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_for_each.rsh: Kernel Invocation Functions and Types
+ *
+ * The rsForEach() function can be used to invoke the root kernel of a script.
+ *
+ * The other functions are used to get the characteristics of the invocation of
+ * an executing kernel, like dimensions and current indices. These functions take
+ * a rs_kernel_context as argument.
+ */
+
+#ifndef RENDERSCRIPT_RS_FOR_EACH_RSH
+#define RENDERSCRIPT_RS_FOR_EACH_RSH
+
+/*
+ * rs_for_each_strategy_t: Suggested cell processing order
+ *
+ * This type is used to suggest how the invoked kernel should iterate over the cells of the
+ * allocations. This is a hint only. Implementations may not follow the suggestion.
+ *
+ * This specification can help the caching behavior of the running kernel, e.g. the cache
+ * locality when the processing is distributed over multiple cores.
+ */
+typedef enum rs_for_each_strategy {
+ RS_FOR_EACH_STRATEGY_SERIAL = 0, // Prefer contiguous memory regions.
+ RS_FOR_EACH_STRATEGY_DONT_CARE = 1, // No prefrences.
+ RS_FOR_EACH_STRATEGY_DST_LINEAR = 2, // Prefer DST.
+ RS_FOR_EACH_STRATEGY_TILE_SMALL = 3, // Prefer processing small rectangular regions.
+ RS_FOR_EACH_STRATEGY_TILE_MEDIUM = 4, // Prefer processing medium rectangular regions.
+ RS_FOR_EACH_STRATEGY_TILE_LARGE = 5 // Prefer processing large rectangular regions.
+} rs_for_each_strategy_t;
+
+/*
+ * rs_kernel_context: Handle to a kernel invocation context
+ *
+ * The kernel context contains common characteristics of the allocations being iterated
+ * over, like dimensions. It also contains rarely used indices of the currently processed
+ * cell, like the Array0 index or the current level of detail.
+ *
+ * You can access the kernel context by adding a special parameter named "context" of type
+ * rs_kernel_context to your kernel function. See rsGetDimX() and rsGetArray0() for examples.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+typedef const struct rs_kernel_context_t * rs_kernel_context;
+#endif
+
+/*
+ * rs_script_call_t: Cell iteration information
+ *
+ * This structure is used to provide iteration information to a rsForEach call.
+ * It is currently used to restrict processing to a subset of cells. In future
+ * versions, it will also be used to provide hint on how to best iterate over
+ * the cells.
+ *
+ * The Start fields are inclusive and the End fields are exclusive. E.g. to iterate
+ * over cells 4, 5, 6, and 7 in the X dimension, set xStart to 4 and xEnd to 8.
+ */
+typedef struct rs_script_call {
+ rs_for_each_strategy_t strategy; // Currently ignored. In the future, will be suggested cell iteration strategy.
+ uint32_t xStart; // Starting index in the X dimension.
+ uint32_t xEnd; // Ending index (exclusive) in the X dimension.
+ uint32_t yStart; // Starting index in the Y dimension.
+ uint32_t yEnd; // Ending index (exclusive) in the Y dimension.
+ uint32_t zStart; // Starting index in the Z dimension.
+ uint32_t zEnd; // Ending index (exclusive) in the Z dimension.
+ uint32_t arrayStart; // Starting index in the Array0 dimension.
+ uint32_t arrayEnd; // Ending index (exclusive) in the Array0 dimension.
+ uint32_t array1Start; // Starting index in the Array1 dimension.
+ uint32_t array1End; // Ending index (exclusive) in the Array1 dimension.
+ uint32_t array2Start; // Starting index in the Array2 dimension.
+ uint32_t array2End; // Ending index (exclusive) in the Array2 dimension.
+ uint32_t array3Start; // Starting index in the Array3 dimension.
+ uint32_t array3End; // Ending index (exclusive) in the Array3 dimension.
+} rs_script_call_t;
+
+/*
+ * rs_kernel: Handle to a kernel function
+ *
+ * An opaque type for a function that is defined with the kernel attribute. A value
+ * of this type can be used in a rsForEach call to launch a kernel.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+typedef void* rs_kernel;
+#endif
+
+/*
+ * rsForEach: Launches a kernel
+ *
+ * Runs the kernel over zero or more input allocations. They are passed after the
+ * rs_kernel argument. If the specified kernel returns a value, an output allocation
+ * must be specified as the last argument. All input allocations,
+ * and the output allocation if it exists, must have the same dimensions.
+ *
+ * This is a synchronous function. A call to this function only returns after all
+ * the work has completed for all cells of the input allocations. If the kernel
+ * function returns any value, the call waits until all results have been written
+ * to the output allocation.
+ *
+ * Up to API level 23, the kernel is implicitly specified as the kernel named
+ * "root" in the specified script, and only a single input allocation can be used.
+ * Starting in API level 24, an arbitrary kernel function can be used,
+ * as specified by the kernel argument. The script argument is removed.
+ * The kernel must be defined in the current script. In addition, more than one
+ * input can be used.
+ *
+ * E.g.
+ * float __attribute__((kernel)) square(float a) {
+ * return a * a;
+ * }
+ *
+ * void compute(rs_allocation ain, rs_allocation aout) {
+ * rsForEach(square, ain, aout);
+ * }
+ *
+ *
+ * Parameters:
+ * script: Script to call.
+ * input: Allocation to source data from.
+ * output: Allocation to write date into.
+ * usrData: User defined data to pass to the script. May be NULL.
+ * sc: Extra control information used to select a sub-region of the allocation to be processed or suggest a walking strategy. May be NULL.
+ * usrDataLen: Size of the userData structure. This will be used to perform a shallow copy of the data if necessary.
+ * kernel: Function designator to a function that is defined with the kernel attribute.
+ * ...: Input and output allocations
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 13)
+extern void __attribute__((overloadable))
+ rsForEach(rs_script script, rs_allocation input, rs_allocation output, const void* usrData,
+ const rs_script_call_t* sc);
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 13)
+extern void __attribute__((overloadable))
+ rsForEach(rs_script script, rs_allocation input, rs_allocation output, const void* usrData);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14) && (RS_VERSION <= 20))
+extern void __attribute__((overloadable))
+ rsForEach(rs_script script, rs_allocation input, rs_allocation output, const void* usrData,
+ size_t usrDataLen, const rs_script_call_t* sc);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14) && (RS_VERSION <= 20))
+extern void __attribute__((overloadable))
+ rsForEach(rs_script script, rs_allocation input, rs_allocation output, const void* usrData,
+ size_t usrDataLen);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14) && (RS_VERSION <= 23))
+extern void __attribute__((overloadable))
+ rsForEach(rs_script script, rs_allocation input, rs_allocation output);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void
+ rsForEach(rs_kernel kernel, ...);
+#endif
+
+/*
+ * rsForEachWithOptions: Launches a kernel with options
+ *
+ * Launches kernel in a way similar to rsForEach. However, instead of processing
+ * all cells in the input, this function only processes cells in the subspace of
+ * the index space specified in options. With the index space explicitly specified
+ * by options, no input or output allocation is required for a kernel launch using
+ * this API. If allocations are passed in, they must match the number of arguments
+ * and return value expected by the kernel function. The output allocation is
+ * present if and only if the kernel has a non-void return value.
+ *
+ * E.g.,
+ * rs_script_call_t opts = {0};
+ * opts.xStart = 0;
+ * opts.xEnd = dimX;
+ * opts.yStart = 0;
+ * opts.yEnd = dimY / 2;
+ * rsForEachWithOptions(foo, &opts, out, out);
+ *
+ *
+ * Parameters:
+ * kernel: Function designator to a function that is defined with the kernel attribute.
+ * options: Launch options
+ * ...: Input and output allocations
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void
+ rsForEachWithOptions(rs_kernel kernel, rs_script_call_t* options, ...);
+#endif
+
+/*
+ * rsGetArray0: Index in the Array0 dimension for the specified kernel context
+ *
+ * Returns the index in the Array0 dimension of the cell being processed, as specified
+ * by the supplied kernel context.
+ *
+ * The kernel context contains common characteristics of the allocations being iterated
+ * over and rarely used indices, like the Array0 index.
+ *
+ * You can access the kernel context by adding a special parameter named "context" of
+ * type rs_kernel_context to your kernel function. E.g.
+ * short RS_KERNEL myKernel(short value, uint32_t x, rs_kernel_context context) {
+ * // The current index in the common x, y, z dimensions are accessed by
+ * // adding these variables as arguments. For the more rarely used indices
+ * // to the other dimensions, extract them from the kernel context:
+ * uint32_t index_a0 = rsGetArray0(context);
+ * //...
+ * }
+ *
+ * This function returns 0 if the Array0 dimension is not present.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetArray0(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetArray1: Index in the Array1 dimension for the specified kernel context
+ *
+ * Returns the index in the Array1 dimension of the cell being processed, as specified
+ * by the supplied kernel context. See rsGetArray0() for an explanation of the context.
+ *
+ * Returns 0 if the Array1 dimension is not present.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetArray1(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetArray2: Index in the Array2 dimension for the specified kernel context
+ *
+ * Returns the index in the Array2 dimension of the cell being processed,
+ * as specified by the supplied kernel context. See rsGetArray0() for an explanation
+ * of the context.
+ *
+ * Returns 0 if the Array2 dimension is not present.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetArray2(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetArray3: Index in the Array3 dimension for the specified kernel context
+ *
+ * Returns the index in the Array3 dimension of the cell being processed, as specified
+ * by the supplied kernel context. See rsGetArray0() for an explanation of the context.
+ *
+ * Returns 0 if the Array3 dimension is not present.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetArray3(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetDimArray0: Size of the Array0 dimension for the specified kernel context
+ *
+ * Returns the size of the Array0 dimension for the specified kernel context.
+ * See rsGetDimX() for an explanation of the context.
+ *
+ * Returns 0 if the Array0 dimension is not present.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetDimArray0(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetDimArray1: Size of the Array1 dimension for the specified kernel context
+ *
+ * Returns the size of the Array1 dimension for the specified kernel context.
+ * See rsGetDimX() for an explanation of the context.
+ *
+ * Returns 0 if the Array1 dimension is not present.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetDimArray1(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetDimArray2: Size of the Array2 dimension for the specified kernel context
+ *
+ * Returns the size of the Array2 dimension for the specified kernel context.
+ * See rsGetDimX() for an explanation of the context.
+ *
+ * Returns 0 if the Array2 dimension is not present.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetDimArray2(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetDimArray3: Size of the Array3 dimension for the specified kernel context
+ *
+ * Returns the size of the Array3 dimension for the specified kernel context.
+ * See rsGetDimX() for an explanation of the context.
+ *
+ * Returns 0 if the Array3 dimension is not present.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetDimArray3(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetDimHasFaces: Presence of more than one face for the specified kernel context
+ *
+ * If the kernel is iterating over a cubemap, this function returns true if there's more
+ * than one face present. In all other cases, it returns false. See rsGetDimX() for an
+ * explanation of the context.
+ *
+ * rsAllocationGetDimFaces() is similar but returns 0 or 1 instead of a bool.
+ *
+ * Returns: Returns true if more than one face is present, false otherwise.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern bool __attribute__((overloadable))
+ rsGetDimHasFaces(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetDimLod: Number of levels of detail for the specified kernel context
+ *
+ * Returns the number of levels of detail for the specified kernel context. This is useful
+ * for mipmaps. See rsGetDimX() for an explanation of the context.
+ *
+ * Returns 0 if Level of Detail is not used.
+ *
+ * rsAllocationGetDimLOD() is similar but returns 0 or 1 instead the actual
+ * number of levels.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetDimLod(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetDimX: Size of the X dimension for the specified kernel context
+ *
+ * Returns the size of the X dimension for the specified kernel context.
+ *
+ * The kernel context contains common characteristics of the allocations being iterated
+ * over and rarely used indices, like the Array0 index.
+ *
+ * You can access it by adding a special parameter named "context" of
+ * type rs_kernel_context to your kernel function. E.g.
+ * int4 RS_KERNEL myKernel(int4 value, rs_kernel_context context) {
+ * uint32_t size = rsGetDimX(context); //...
+ *
+ * To get the dimension of specific allocation, use rsAllocationGetDimX().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetDimX(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetDimY: Size of the Y dimension for the specified kernel context
+ *
+ * Returns the size of the X dimension for the specified kernel context.
+ * See rsGetDimX() for an explanation of the context.
+ *
+ * Returns 0 if the Y dimension is not present.
+ *
+ * To get the dimension of specific allocation, use rsAllocationGetDimY().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetDimY(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetDimZ: Size of the Z dimension for the specified kernel context
+ *
+ * Returns the size of the Z dimension for the specified kernel context.
+ * See rsGetDimX() for an explanation of the context.
+ *
+ * Returns 0 if the Z dimension is not present.
+ *
+ * To get the dimension of specific allocation, use rsAllocationGetDimZ().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetDimZ(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetFace: Coordinate of the Face for the specified kernel context
+ *
+ * Returns the face on which the cell being processed is found, as specified by the
+ * supplied kernel context. See rsGetArray0() for an explanation of the context.
+ *
+ * Returns RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X if the face dimension is not
+ * present.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern rs_allocation_cubemap_face __attribute__((overloadable))
+ rsGetFace(rs_kernel_context context);
+#endif
+
+/*
+ * rsGetLod: Index in the Levels of Detail dimension for the specified kernel context
+ *
+ * Returns the index in the Levels of Detail dimension of the cell being processed,
+ * as specified by the supplied kernel context. See rsGetArray0() for an explanation of
+ * the context.
+ *
+ * Returns 0 if the Levels of Detail dimension is not present.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+extern uint32_t __attribute__((overloadable))
+ rsGetLod(rs_kernel_context context);
+#endif
+
+#endif // RENDERSCRIPT_RS_FOR_EACH_RSH
diff --git a/current/platform/rs/scriptc/rs_graphics.rsh b/current/platform/rs/scriptc/rs_graphics.rsh
new file mode 100644
index 0000000..10ec640
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_graphics.rsh
@@ -0,0 +1,1522 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_graphics.rsh: Graphics Functions and Types
+ *
+ * The graphics subsystem of RenderScript was removed at API level 23.
+ */
+
+#ifndef RENDERSCRIPT_RS_GRAPHICS_RSH
+#define RENDERSCRIPT_RS_GRAPHICS_RSH
+
+#ifdef __LP64__
+// TODO We need to fix some of the builds before enabling this error:
+// #error "RenderScript graphics is deprecated and not supported in 64bit mode."
+#endif
+
+// TODO we seem to assume order for the other headers too.
+#include "rs_object_types.rsh"
+
+/*
+ * rs_blend_src_func: Blend source function
+ *
+ * DEPRECATED. Do not use.
+ *
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+typedef enum __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) {
+ RS_BLEND_SRC_ZERO = 0,
+ RS_BLEND_SRC_ONE = 1,
+ RS_BLEND_SRC_DST_COLOR = 2,
+ RS_BLEND_SRC_ONE_MINUS_DST_COLOR = 3,
+ RS_BLEND_SRC_SRC_ALPHA = 4,
+ RS_BLEND_SRC_ONE_MINUS_SRC_ALPHA = 5,
+ RS_BLEND_SRC_DST_ALPHA = 6,
+ RS_BLEND_SRC_ONE_MINUS_DST_ALPHA = 7,
+ RS_BLEND_SRC_SRC_ALPHA_SATURATE = 8,
+ RS_BLEND_SRC_INVALID = 100
+} rs_blend_src_func;
+#endif
+#endif
+
+/*
+ * rs_blend_dst_func: Blend destination function
+ *
+ * DEPRECATED. Do not use.
+ *
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+typedef enum __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) {
+ RS_BLEND_DST_ZERO = 0,
+ RS_BLEND_DST_ONE = 1,
+ RS_BLEND_DST_SRC_COLOR = 2,
+ RS_BLEND_DST_ONE_MINUS_SRC_COLOR = 3,
+ RS_BLEND_DST_SRC_ALPHA = 4,
+ RS_BLEND_DST_ONE_MINUS_SRC_ALPHA = 5,
+ RS_BLEND_DST_DST_ALPHA = 6,
+ RS_BLEND_DST_ONE_MINUS_DST_ALPHA = 7,
+ RS_BLEND_DST_INVALID = 100
+} rs_blend_dst_func;
+#endif
+#endif
+
+/*
+ * rs_cull_mode: Culling mode
+ *
+ * DEPRECATED. Do not use.
+ *
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+typedef enum __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) {
+ RS_CULL_BACK = 0,
+ RS_CULL_FRONT = 1,
+ RS_CULL_NONE = 2,
+ RS_CULL_INVALID = 100
+} rs_cull_mode;
+#endif
+#endif
+
+/*
+ * rs_depth_func: Depth function
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Specifies conditional drawing depending on the comparison of the incoming
+ * depth to that found in the depth buffer.
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+typedef enum __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) {
+ RS_DEPTH_FUNC_ALWAYS = 0, // Always drawn
+ RS_DEPTH_FUNC_LESS = 1, // Drawn if the incoming depth value is less than that in the depth buffer
+ RS_DEPTH_FUNC_LEQUAL = 2, // Drawn if the incoming depth value is less or equal to that in the depth buffer
+ RS_DEPTH_FUNC_GREATER = 3, // Drawn if the incoming depth value is greater than that in the depth buffer
+ RS_DEPTH_FUNC_GEQUAL = 4, // Drawn if the incoming depth value is greater or equal to that in the depth buffer
+ RS_DEPTH_FUNC_EQUAL = 5, // Drawn if the incoming depth value is equal to that in the depth buffer
+ RS_DEPTH_FUNC_NOTEQUAL = 6, // Drawn if the incoming depth value is not equal to that in the depth buffer
+ RS_DEPTH_FUNC_INVALID = 100 // Invalid depth function
+} rs_depth_func;
+#endif
+#endif
+
+/*
+ * rs_primitive: How to intepret mesh vertex data
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Describes the way mesh vertex data is interpreted when rendering
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+typedef enum __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) {
+ RS_PRIMITIVE_POINT = 0, // Vertex data will be rendered as a series of points
+ RS_PRIMITIVE_LINE = 1, // Vertex pairs will be rendered as lines
+ RS_PRIMITIVE_LINE_STRIP = 2, // Vertex data will be rendered as a connected line strip
+ RS_PRIMITIVE_TRIANGLE = 3, // Vertices will be rendered as individual triangles
+ RS_PRIMITIVE_TRIANGLE_STRIP = 4, // Vertices will be rendered as a connected triangle strip defined by the first three vertices with each additional triangle defined by a new vertex
+ RS_PRIMITIVE_TRIANGLE_FAN = 5, // Vertices will be rendered as a sequence of triangles that all share first vertex as the origin
+ RS_PRIMITIVE_INVALID = 100 // Invalid primitive
+} rs_primitive;
+#endif
+#endif
+
+/*
+ * rs_font: Handle to a Font
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Opaque handle to a RenderScript font object.
+ * See: android.renderscript.Font
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+typedef struct rs_font _RS_OBJECT_DECL __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) rs_font;
+#endif
+#endif
+
+/*
+ * rs_mesh: Handle to a Mesh
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Opaque handle to a RenderScript mesh object.
+ * See: android.renderscript.Mesh
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+typedef struct rs_mesh _RS_OBJECT_DECL __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) rs_mesh;
+#endif
+#endif
+
+/*
+ * rs_program_fragment: Handle to a ProgramFragment
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Opaque handle to a RenderScript ProgramFragment object.
+ * See: android.renderscript.ProgramFragment
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+typedef struct rs_program_fragment _RS_OBJECT_DECL __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) rs_program_fragment;
+#endif
+#endif
+
+/*
+ * rs_program_vertex: Handle to a ProgramVertex
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Opaque handle to a RenderScript ProgramVertex object.
+ * See: android.renderscript.ProgramVertex
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+typedef struct rs_program_vertex _RS_OBJECT_DECL __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) rs_program_vertex;
+#endif
+#endif
+
+/*
+ * rs_program_raster: Handle to a ProgramRaster
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Opaque handle to a RenderScript ProgramRaster object.
+ * See: android.renderscript.ProgramRaster
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+typedef struct rs_program_raster _RS_OBJECT_DECL __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) rs_program_raster;
+#endif
+#endif
+
+/*
+ * rs_program_store: Handle to a ProgramStore
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Opaque handle to a RenderScript ProgramStore object.
+ * See: android.renderscript.ProgramStore
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+typedef struct rs_program_store _RS_OBJECT_DECL __attribute__((
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+deprecated
+#endif
+)) rs_program_store;
+#endif
+#endif
+
+/*
+ * rsClearObject: Release an object
+ *
+ * Tells the run time that this handle will no longer be used to access the the related
+ * object. If this was the last handle to that object, resource recovery may happen.
+ *
+ * After calling this function, *dst will be set to an empty handle. See rsIsObject().
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsClearObject(rs_mesh* dst);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsClearObject(rs_program_fragment* dst);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsClearObject(rs_program_vertex* dst);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsClearObject(rs_program_raster* dst);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsClearObject(rs_program_store* dst);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsClearObject(rs_font* dst);
+#endif
+#endif
+
+/*
+ * rsIsObject: Check for an empty handle
+ *
+ * Returns true if the handle contains a non-null reference.
+ *
+ * This function does not validate that the internal pointer used in the handle
+ * points to an actual valid object; it only checks for null.
+ *
+ * This function can be used to check the Element returned by rsElementGetSubElement()
+ * or see if rsClearObject() has been called on a handle.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_mesh v);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_program_fragment v);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_program_vertex v);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_program_raster v);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_program_store v);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_font v);
+#endif
+#endif
+
+/*
+ * rsSetObject: For internal use.
+ *
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsSetObject(rs_mesh* dst, rs_mesh src);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsSetObject(rs_program_fragment* dst, rs_program_fragment src);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsSetObject(rs_program_vertex* dst, rs_program_vertex src);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsSetObject(rs_program_raster* dst, rs_program_raster src);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsSetObject(rs_program_store* dst, rs_program_store src);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (RS_VERSION <= 22)
+extern void __attribute__((overloadable))
+ rsSetObject(rs_font* dst, rs_font src);
+#endif
+#endif
+
+/*
+ * rsgAllocationSyncAll: Sync the contents of an allocation
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Sync the contents of an allocation.
+ *
+ * If the source is specified, sync from memory space specified by source.
+ *
+ * If the source is not specified, sync from its SCRIPT memory space to its HW
+ * memory spaces.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgAllocationSyncAll(rs_allocation alloc);
+#endif
+#endif
+
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 14) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgAllocationSyncAll(rs_allocation alloc, rs_allocation_usage_type source);
+#endif
+#endif
+
+/*
+ * rsgBindColorTarget: Set the color target
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Set the color target used for all subsequent rendering calls
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 14) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindColorTarget(rs_allocation colorTarget, uint slot);
+#endif
+#endif
+
+/*
+ * rsgBindConstant: Bind a constant allocation
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Bind a new Allocation object to a ProgramFragment or ProgramVertex.
+ * The Allocation must be a valid constant input for the Program.
+ *
+ * Parameters:
+ * ps: program fragment object
+ * slot: index of the constant buffer on the program
+ * c: constants to bind
+ * pv: program vertex object
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindConstant(rs_program_fragment ps, uint slot, rs_allocation c);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindConstant(rs_program_vertex pv, uint slot, rs_allocation c);
+#endif
+#endif
+
+/*
+ * rsgBindDepthTarget: Set the depth target
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Set the depth target used for all subsequent rendering calls
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 14) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindDepthTarget(rs_allocation depthTarget);
+#endif
+#endif
+
+/*
+ * rsgBindFont: Bind a font object
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Binds the font object to be used for all subsequent font rendering calls
+ *
+ * Parameters:
+ * font: object to bind
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindFont(rs_font font);
+#endif
+#endif
+
+/*
+ * rsgBindProgramFragment: Bind a ProgramFragment
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Bind a new ProgramFragment to the rendering context.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindProgramFragment(rs_program_fragment pf);
+#endif
+#endif
+
+/*
+ * rsgBindProgramRaster: Bind a ProgramRaster
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Bind a new ProgramRaster to the rendering context.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindProgramRaster(rs_program_raster pr);
+#endif
+#endif
+
+/*
+ * rsgBindProgramStore: Bind a ProgramStore
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Bind a new ProgramStore to the rendering context.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindProgramStore(rs_program_store ps);
+#endif
+#endif
+
+/*
+ * rsgBindProgramVertex: Bind a ProgramVertex
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Bind a new ProgramVertex to the rendering context.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindProgramVertex(rs_program_vertex pv);
+#endif
+#endif
+
+/*
+ * rsgBindSampler: Bind a sampler
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Bind a new Sampler object to a ProgramFragment. The sampler will
+ * operate on the texture bound at the matching slot.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindSampler(rs_program_fragment fragment, uint slot, rs_sampler sampler);
+#endif
+#endif
+
+/*
+ * rsgBindTexture: Bind a texture allocation
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Bind a new Allocation object to a ProgramFragment. The
+ * Allocation must be a valid texture for the Program. The sampling
+ * of the texture will be controled by the Sampler bound at the
+ * matching slot.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgBindTexture(rs_program_fragment v, uint slot, rs_allocation alloc);
+#endif
+#endif
+
+/*
+ * rsgClearAllRenderTargets: Clear all color and depth targets
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Clear all color and depth targets and resume rendering into
+ * the framebuffer
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 14) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgClearAllRenderTargets(void);
+#endif
+#endif
+
+/*
+ * rsgClearColor: Clear the specified color from the surface
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Clears the rendering surface to the specified color.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgClearColor(float r, float g, float b, float a);
+#endif
+#endif
+
+/*
+ * rsgClearColorTarget: Clear the color target
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Clear the previously set color target
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 14) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgClearColorTarget(uint slot);
+#endif
+#endif
+
+/*
+ * rsgClearDepth: Clear the depth surface
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Clears the depth suface to the specified value.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgClearDepth(float value);
+#endif
+#endif
+
+/*
+ * rsgClearDepthTarget: Clear the depth target
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Clear the previously set depth target
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 14) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgClearDepthTarget(void);
+#endif
+#endif
+
+/*
+ * rsgDrawMesh: Draw a mesh
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Draw a mesh using the current context state.
+ *
+ * If primitiveIndex is specified, draw part of a mesh using the current context state.
+ *
+ * If start and len are also specified, draw specified index range of part of a mesh using the current context state.
+ *
+ * Otherwise the whole mesh is rendered.
+ *
+ * Parameters:
+ * ism: mesh object to render
+ * primitiveIndex: for meshes that contain multiple primitive groups this parameter specifies the index of the group to draw.
+ * start: starting index in the range
+ * len: number of indices to draw
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgDrawMesh(rs_mesh ism);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgDrawMesh(rs_mesh ism, uint primitiveIndex);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgDrawMesh(rs_mesh ism, uint primitiveIndex, uint start, uint len);
+#endif
+#endif
+
+/*
+ * rsgDrawQuad: Draw a quad
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Low performance utility function for drawing a simple quad. Not intended for
+ * drawing large quantities of geometry.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgDrawQuad(float x1, float y1, float z1, float x2, float y2, float z2, float x3, float y3,
+ float z3, float x4, float y4, float z4);
+#endif
+#endif
+
+/*
+ * rsgDrawQuadTexCoords: Draw a textured quad
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Low performance utility function for drawing a textured quad. Not intended
+ * for drawing large quantities of geometry.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgDrawQuadTexCoords(float x1, float y1, float z1, float u1, float v1, float x2, float y2,
+ float z2, float u2, float v2, float x3, float y3, float z3, float u3,
+ float v3, float x4, float y4, float z4, float u4, float v4);
+#endif
+#endif
+
+/*
+ * rsgDrawRect: Draw a rectangle
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Low performance utility function for drawing a simple rectangle. Not
+ * intended for drawing large quantities of geometry.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgDrawRect(float x1, float y1, float x2, float y2, float z);
+#endif
+#endif
+
+/*
+ * rsgDrawSpriteScreenspace: Draw rectangles in screenspace
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Low performance function for drawing rectangles in screenspace. This
+ * function uses the default passthough ProgramVertex. Any bound ProgramVertex
+ * is ignored. This function has considerable overhead and should not be used
+ * for drawing in shipping applications.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgDrawSpriteScreenspace(float x, float y, float z, float w, float h);
+#endif
+#endif
+
+/*
+ * rsgDrawText: Draw a text string
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Draws text given a string and location
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgDrawText(const char* text, int x, int y);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgDrawText(rs_allocation alloc, int x, int y);
+#endif
+#endif
+
+/*
+ * rsgFinish: End rendering commands
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Force RenderScript to finish all rendering commands
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 14) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern uint __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgFinish(void);
+#endif
+#endif
+
+/*
+ * rsgFontColor: Set the font color
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Sets the font color for all subsequent rendering calls
+ *
+ * Parameters:
+ * r: red component
+ * g: green component
+ * b: blue component
+ * a: alpha component
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgFontColor(float r, float g, float b, float a);
+#endif
+#endif
+
+/*
+ * rsgGetHeight: Get the surface height
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get the height of the current rendering surface.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern uint __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgGetHeight(void);
+#endif
+#endif
+
+/*
+ * rsgGetWidth: Get the surface width
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get the width of the current rendering surface.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern uint __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgGetWidth(void);
+#endif
+#endif
+
+/*
+ * rsgMeasureText: Get the bounding box for a text string
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Returns the bounding box of the text relative to (0, 0)
+ * Any of left, right, top, bottom could be NULL
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgMeasureText(const char* text, int* left, int* right, int* top, int* bottom);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgMeasureText(rs_allocation alloc, int* left, int* right, int* top, int* bottom);
+#endif
+#endif
+
+/*
+ * rsgMeshComputeBoundingBox: Compute a bounding box
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Computes an axis aligned bounding box of a mesh object
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgMeshComputeBoundingBox(rs_mesh mesh, float* minX, float* minY, float* min, float* maxX,
+ float* maxY, float* maxZ);
+#endif
+#endif
+
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+static inline void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgMeshComputeBoundingBox(rs_mesh mesh, float3* bBoxMin, float3* bBoxMax) {
+ float x1, y1, z1, x2, y2, z2;
+ rsgMeshComputeBoundingBox(mesh, &x1, &y1, &z1, &x2, &y2, &z2);
+ bBoxMin->x = x1;
+ bBoxMin->y = y1;
+ bBoxMin->z = z1;
+ bBoxMax->x = x2;
+ bBoxMax->y = y2;
+ bBoxMax->z = z2;
+}
+#endif
+#endif
+
+/*
+ * rsgMeshGetIndexAllocation: Return an allocation containing index data
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Returns an allocation containing index data or a null
+ * allocation if only the primitive is specified
+ *
+ * Parameters:
+ * m: mesh to get data from
+ * index: index of the index allocation
+ *
+ * Returns: allocation containing index data
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern rs_allocation __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgMeshGetIndexAllocation(rs_mesh m, uint32_t index);
+#endif
+#endif
+
+/*
+ * rsgMeshGetPrimitive: Return the primitive
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Returns the primitive describing how a part of the mesh is
+ * rendered
+ *
+ * Parameters:
+ * m: mesh to get data from
+ * index: index of the primitive
+ *
+ * Returns: primitive describing how the mesh is rendered
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern rs_primitive __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgMeshGetPrimitive(rs_mesh m, uint32_t index);
+#endif
+#endif
+
+/*
+ * rsgMeshGetPrimitiveCount: Return the number of index sets
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Meshes could have multiple index sets, this function returns
+ * the number.
+ *
+ * Parameters:
+ * m: mesh to get data from
+ *
+ * Returns: number of primitive groups in the mesh. This would include simple primitives as well as allocations containing index data
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern uint32_t __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgMeshGetPrimitiveCount(rs_mesh m);
+#endif
+#endif
+
+/*
+ * rsgMeshGetVertexAllocation: Return a vertex allocation
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Returns an allocation that is part of the mesh and contains
+ * vertex data, e.g. positions, normals, texcoords
+ *
+ * Parameters:
+ * m: mesh to get data from
+ * index: index of the vertex allocation
+ *
+ * Returns: allocation containing vertex data
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern rs_allocation __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgMeshGetVertexAllocation(rs_mesh m, uint32_t index);
+#endif
+#endif
+
+/*
+ * rsgMeshGetVertexAllocationCount: Return the number of vertex allocations
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Returns the number of allocations in the mesh that contain
+ * vertex data
+ *
+ * Parameters:
+ * m: mesh to get data from
+ *
+ * Returns: number of allocations in the mesh that contain vertex data
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern uint32_t __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgMeshGetVertexAllocationCount(rs_mesh m);
+#endif
+#endif
+
+/*
+ * rsgProgramFragmentConstantColor: Set the constant color for a fixed function emulation program
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Set the constant color for a fixed function emulation program.
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramFragmentConstantColor(rs_program_fragment pf, float r, float g, float b, float a);
+#endif
+#endif
+
+/*
+ * rsgProgramVertexGetProjectionMatrix: Get the projection matrix for a fixed function vertex program
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get the projection matrix for a currently bound fixed function
+ * vertex program. Calling this function with a custom vertex shader
+ * would result in an error.
+ *
+ * Parameters:
+ * proj: matrix to store the current projection matrix into
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramVertexGetProjectionMatrix(rs_matrix4x4* proj);
+#endif
+#endif
+
+/*
+ * rsgProgramVertexLoadModelMatrix: Load the model matrix for a bound fixed function vertex program
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Load the model matrix for a currently bound fixed function
+ * vertex program. Calling this function with a custom vertex shader
+ * would result in an error.
+ *
+ * Parameters:
+ * model: model matrix
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramVertexLoadModelMatrix(const rs_matrix4x4* model);
+#endif
+#endif
+
+/*
+ * rsgProgramVertexLoadProjectionMatrix: Load the projection matrix for a bound fixed function vertex program
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Load the projection matrix for a currently bound fixed function
+ * vertex program. Calling this function with a custom vertex shader
+ * would result in an error.
+ *
+ * Parameters:
+ * proj: projection matrix
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramVertexLoadProjectionMatrix(const rs_matrix4x4* proj);
+#endif
+#endif
+
+/*
+ * rsgProgramVertexLoadTextureMatrix: Load the texture matrix for a bound fixed function vertex program
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Load the texture matrix for a currently bound fixed function
+ * vertex program. Calling this function with a custom vertex shader
+ * would result in an error.
+ *
+ * Parameters:
+ * tex: texture matrix
+ */
+#ifndef __LP64__
+#if !defined(RS_VERSION) || (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22)
+extern void __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramVertexLoadTextureMatrix(const rs_matrix4x4* tex);
+#endif
+#endif
+
+/*
+ * rsgProgramRasterGetCullMode: Get program raster cull mode
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program raster cull mode
+ *
+ * Parameters:
+ * pr: program raster to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern rs_cull_mode __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramRasterGetCullMode(rs_program_raster pr);
+#endif
+#endif
+
+/*
+ * rsgProgramRasterIsPointSpriteEnabled: Get program raster point sprite state
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program raster point sprite state
+ *
+ * Parameters:
+ * pr: program raster to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern bool __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramRasterIsPointSpriteEnabled(rs_program_raster pr);
+#endif
+#endif
+
+/*
+ * rsgProgramStoreGetBlendDstFunc: Get program store blend destination function
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program store blend destination function
+ *
+ * Parameters:
+ * ps: program store to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern rs_blend_dst_func __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramStoreGetBlendDstFunc(rs_program_store ps);
+#endif
+#endif
+
+/*
+ * rsgProgramStoreGetBlendSrcFunc: Get program store blend source function
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program store blend source function
+ *
+ * Parameters:
+ * ps: program store to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern rs_blend_src_func __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramStoreGetBlendSrcFunc(rs_program_store ps);
+#endif
+#endif
+
+/*
+ * rsgProgramStoreGetDepthFunc: Get program store depth function
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program store depth function
+ *
+ * Parameters:
+ * ps: program store to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern rs_depth_func __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramStoreGetDepthFunc(rs_program_store ps);
+#endif
+#endif
+
+/*
+ * rsgProgramStoreIsColorMaskAlphaEnabled: Get program store alpha component color mask
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program store alpha component color mask
+ *
+ * Parameters:
+ * ps: program store to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern bool __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramStoreIsColorMaskAlphaEnabled(rs_program_store ps);
+#endif
+#endif
+
+/*
+ * rsgProgramStoreIsColorMaskBlueEnabled: Get program store blur component color mask
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program store blur component color mask
+ *
+ * Parameters:
+ * ps: program store to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern bool __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramStoreIsColorMaskBlueEnabled(rs_program_store ps);
+#endif
+#endif
+
+/*
+ * rsgProgramStoreIsColorMaskGreenEnabled: Get program store green component color mask
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program store green component color mask
+ *
+ * Parameters:
+ * ps: program store to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern bool __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramStoreIsColorMaskGreenEnabled(rs_program_store ps);
+#endif
+#endif
+
+/*
+ * rsgProgramStoreIsColorMaskRedEnabled: Get program store red component color mask
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program store red component color mask
+ *
+ * Parameters:
+ * ps: program store to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern bool __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramStoreIsColorMaskRedEnabled(rs_program_store ps);
+#endif
+#endif
+
+/*
+ * rsgProgramStoreIsDepthMaskEnabled: Get program store depth mask
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program store depth mask
+ *
+ * Parameters:
+ * ps: program store to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern bool __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramStoreIsDepthMaskEnabled(rs_program_store ps);
+#endif
+#endif
+
+/*
+ * rsgProgramStoreIsDitherEnabled: Get program store dither state
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Get program store dither state
+ *
+ * Parameters:
+ * ps: program store to query
+ */
+#ifndef __LP64__
+#if (defined(RS_VERSION) && (RS_VERSION >= 16) && (defined(RS_DECLARE_EXPIRED_APIS) || RS_VERSION <= 22))
+extern bool __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated
+#endif
+))
+ rsgProgramStoreIsDitherEnabled(rs_program_store ps);
+#endif
+#endif
+
+#endif // RENDERSCRIPT_RS_GRAPHICS_RSH
diff --git a/current/platform/rs/scriptc/rs_io.rsh b/current/platform/rs/scriptc/rs_io.rsh
new file mode 100644
index 0000000..2ffbe4b
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_io.rsh
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_io.rsh: Input/Output Functions
+ *
+ * These functions are used to:
+ * - Send information to the Java client, and
+ * - Send the processed allocation or receive the next allocation to process.
+ */
+
+#ifndef RENDERSCRIPT_RS_IO_RSH
+#define RENDERSCRIPT_RS_IO_RSH
+
+/*
+ * rsAllocationIoReceive: Receive new content from the queue
+ *
+ * Receive a new set of contents from the queue.
+ *
+ * This function should not be called from inside a kernel, or from any function
+ * that may be called directly or indirectly from a kernel. Doing so would cause a
+ * runtime error.
+ *
+ * Parameters:
+ * a: Allocation to work on.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern void __attribute__((overloadable))
+ rsAllocationIoReceive(rs_allocation a);
+#endif
+
+/*
+ * rsAllocationIoSend: Send new content to the queue
+ *
+ * Send the contents of the Allocation to the queue.
+ *
+ * This function should not be called from inside a kernel, or from any function
+ * that may be called directly or indirectly from a kernel. Doing so would cause a
+ * runtime error.
+ *
+ * Parameters:
+ * a: Allocation to work on.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern void __attribute__((overloadable))
+ rsAllocationIoSend(rs_allocation a);
+#endif
+
+/*
+ * rsSendToClient: Send a message to the client, non-blocking
+ *
+ * Sends a message back to the client. This call does not block.
+ * It returns true if the message was sent and false if the
+ * message queue is full.
+ *
+ * A message ID is required. The data payload is optional.
+ *
+ * See RenderScript.RSMessageHandler.
+ *
+ * Parameters:
+ * data: Application specific data.
+ * len: Length of the data, in bytes.
+ */
+extern bool __attribute__((overloadable))
+ rsSendToClient(int cmdID);
+
+extern bool __attribute__((overloadable))
+ rsSendToClient(int cmdID, const void* data, uint len);
+
+/*
+ * rsSendToClientBlocking: Send a message to the client, blocking
+ *
+ * Sends a message back to the client. This function will block
+ * until there is room on the message queue for this message.
+ * This function may return before the message was delivered and
+ * processed by the client.
+ *
+ * A message ID is required. The data payload is optional.
+ *
+ * See RenderScript.RSMessageHandler.
+ *
+ * Parameters:
+ * data: Application specific data.
+ * len: Length of the data, in bytes.
+ */
+extern void __attribute__((overloadable))
+ rsSendToClientBlocking(int cmdID);
+
+extern void __attribute__((overloadable))
+ rsSendToClientBlocking(int cmdID, const void* data, uint len);
+
+#endif // RENDERSCRIPT_RS_IO_RSH
diff --git a/current/platform/rs/scriptc/rs_math.rsh b/current/platform/rs/scriptc/rs_math.rsh
new file mode 100644
index 0000000..3d034d0
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_math.rsh
@@ -0,0 +1,6550 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_math.rsh: Mathematical Constants and Functions
+ *
+ * The mathematical functions below can be applied to scalars and vectors. When applied
+ * to vectors, the returned value is a vector of the function applied to each entry of the input.
+ *
+ * For example:
+ * float3 a, b;
+ * // The following call sets
+ * // a.x to sin(b.x),
+ * // a.y to sin(b.y), and
+ * // a.z to sin(b.z).
+ * a = sin(b);
+ *
+ *
+ * See Vector Math Functions for functions like distance() and length() that interpret
+ * instead the input as a single vector in n-dimensional space.
+ *
+ * The precision of the mathematical operations on 32 bit floats is affected by the pragmas
+ * rs_fp_relaxed and rs_fp_full. Under rs_fp_relaxed, subnormal values may be flushed to zero and
+ * rounding may be done towards zero. In comparison, rs_fp_full requires correct handling of
+ * subnormal values, i.e. smaller than 1.17549435e-38f. rs_fp_rull also requires round to nearest
+ * with ties to even.
+ *
+ * Different precision/speed tradeoffs can be achieved by using variants of the common math
+ * functions. Functions with a name starting with
+ * - native_: May have custom hardware implementations with weaker precision. Additionally,
+ * subnormal values may be flushed to zero, rounding towards zero may be used, and NaN and
+ * infinity input may not be handled correctly.
+ * - half_: May perform internal computations using 16 bit floats. Additionally, subnormal
+ * values may be flushed to zero, and rounding towards zero may be used.
+ *
+ */
+
+#ifndef RENDERSCRIPT_RS_MATH_RSH
+#define RENDERSCRIPT_RS_MATH_RSH
+
+/*
+ * M_1_PI: 1 / pi, as a 32 bit float
+ *
+ * The inverse of pi, as a 32 bit float.
+ */
+#define M_1_PI 0.318309886183790671537767526745028724f
+
+/*
+ * M_2_PI: 2 / pi, as a 32 bit float
+ *
+ * 2 divided by pi, as a 32 bit float.
+ */
+#define M_2_PI 0.636619772367581343075535053490057448f
+
+/*
+ * M_2_PIl: 2 / pi, as a 32 bit float
+ *
+ * DEPRECATED. Do not use.
+ *
+ * 2 divided by pi, as a 32 bit float.
+ */
+#define M_2_PIl 0.636619772367581343075535053490057448f
+
+/*
+ * M_2_SQRTPI: 2 / sqrt(pi), as a 32 bit float
+ *
+ * 2 divided by the square root of pi, as a 32 bit float.
+ */
+#define M_2_SQRTPI 1.128379167095512573896158903121545172f
+
+/*
+ * M_E: e, as a 32 bit float
+ *
+ * The number e, the base of the natural logarithm, as a 32 bit float.
+ */
+#define M_E 2.718281828459045235360287471352662498f
+
+/*
+ * M_LN10: log_e(10), as a 32 bit float
+ *
+ * The natural logarithm of 10, as a 32 bit float.
+ */
+#define M_LN10 2.302585092994045684017991454684364208f
+
+/*
+ * M_LN2: log_e(2), as a 32 bit float
+ *
+ * The natural logarithm of 2, as a 32 bit float.
+ */
+#define M_LN2 0.693147180559945309417232121458176568f
+
+/*
+ * M_LOG10E: log_10(e), as a 32 bit float
+ *
+ * The logarithm base 10 of e, as a 32 bit float.
+ */
+#define M_LOG10E 0.434294481903251827651128918916605082f
+
+/*
+ * M_LOG2E: log_2(e), as a 32 bit float
+ *
+ * The logarithm base 2 of e, as a 32 bit float.
+ */
+#define M_LOG2E 1.442695040888963407359924681001892137f
+
+/*
+ * M_PI: pi, as a 32 bit float
+ *
+ * The constant pi, as a 32 bit float.
+ */
+#define M_PI 3.141592653589793238462643383279502884f
+
+/*
+ * M_PI_2: pi / 2, as a 32 bit float
+ *
+ * Pi divided by 2, as a 32 bit float.
+ */
+#define M_PI_2 1.570796326794896619231321691639751442f
+
+/*
+ * M_PI_4: pi / 4, as a 32 bit float
+ *
+ * Pi divided by 4, as a 32 bit float.
+ */
+#define M_PI_4 0.785398163397448309615660845819875721f
+
+/*
+ * M_SQRT1_2: 1 / sqrt(2), as a 32 bit float
+ *
+ * The inverse of the square root of 2, as a 32 bit float.
+ */
+#define M_SQRT1_2 0.707106781186547524400844362104849039f
+
+/*
+ * M_SQRT2: sqrt(2), as a 32 bit float
+ *
+ * The square root of 2, as a 32 bit float.
+ */
+#define M_SQRT2 1.414213562373095048801688724209698079f
+
+/*
+ * abs: Absolute value of an integer
+ *
+ * Returns the absolute value of an integer.
+ *
+ * For floats, use fabs().
+ */
+extern uchar __attribute__((const, overloadable))
+ abs(char v);
+
+extern uchar2 __attribute__((const, overloadable))
+ abs(char2 v);
+
+extern uchar3 __attribute__((const, overloadable))
+ abs(char3 v);
+
+extern uchar4 __attribute__((const, overloadable))
+ abs(char4 v);
+
+extern ushort __attribute__((const, overloadable))
+ abs(short v);
+
+extern ushort2 __attribute__((const, overloadable))
+ abs(short2 v);
+
+extern ushort3 __attribute__((const, overloadable))
+ abs(short3 v);
+
+extern ushort4 __attribute__((const, overloadable))
+ abs(short4 v);
+
+extern uint __attribute__((const, overloadable))
+ abs(int v);
+
+extern uint2 __attribute__((const, overloadable))
+ abs(int2 v);
+
+extern uint3 __attribute__((const, overloadable))
+ abs(int3 v);
+
+extern uint4 __attribute__((const, overloadable))
+ abs(int4 v);
+
+/*
+ * acos: Inverse cosine
+ *
+ * Returns the inverse cosine, in radians.
+ *
+ * See also native_acos().
+ */
+extern float __attribute__((const, overloadable))
+ acos(float v);
+
+extern float2 __attribute__((const, overloadable))
+ acos(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ acos(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ acos(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ acos(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ acos(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ acos(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ acos(half4 v);
+#endif
+
+/*
+ * acosh: Inverse hyperbolic cosine
+ *
+ * Returns the inverse hyperbolic cosine, in radians.
+ *
+ * See also native_acosh().
+ */
+extern float __attribute__((const, overloadable))
+ acosh(float v);
+
+extern float2 __attribute__((const, overloadable))
+ acosh(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ acosh(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ acosh(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ acosh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ acosh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ acosh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ acosh(half4 v);
+#endif
+
+/*
+ * acospi: Inverse cosine divided by pi
+ *
+ * Returns the inverse cosine in radians, divided by pi.
+ *
+ * To get an inverse cosine measured in degrees, use acospi(a) * 180.f.
+ *
+ * See also native_acospi().
+ */
+extern float __attribute__((const, overloadable))
+ acospi(float v);
+
+extern float2 __attribute__((const, overloadable))
+ acospi(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ acospi(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ acospi(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ acospi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ acospi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ acospi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ acospi(half4 v);
+#endif
+
+/*
+ * asin: Inverse sine
+ *
+ * Returns the inverse sine, in radians.
+ *
+ * See also native_asin().
+ */
+extern float __attribute__((const, overloadable))
+ asin(float v);
+
+extern float2 __attribute__((const, overloadable))
+ asin(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ asin(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ asin(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ asin(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ asin(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ asin(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ asin(half4 v);
+#endif
+
+/*
+ * asinh: Inverse hyperbolic sine
+ *
+ * Returns the inverse hyperbolic sine, in radians.
+ *
+ * See also native_asinh().
+ */
+extern float __attribute__((const, overloadable))
+ asinh(float v);
+
+extern float2 __attribute__((const, overloadable))
+ asinh(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ asinh(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ asinh(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ asinh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ asinh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ asinh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ asinh(half4 v);
+#endif
+
+/*
+ * asinpi: Inverse sine divided by pi
+ *
+ * Returns the inverse sine in radians, divided by pi.
+ *
+ * To get an inverse sine measured in degrees, use asinpi(a) * 180.f.
+ *
+ * See also native_asinpi().
+ */
+extern float __attribute__((const, overloadable))
+ asinpi(float v);
+
+extern float2 __attribute__((const, overloadable))
+ asinpi(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ asinpi(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ asinpi(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ asinpi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ asinpi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ asinpi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ asinpi(half4 v);
+#endif
+
+/*
+ * atan: Inverse tangent
+ *
+ * Returns the inverse tangent, in radians.
+ *
+ * See also native_atan().
+ */
+extern float __attribute__((const, overloadable))
+ atan(float v);
+
+extern float2 __attribute__((const, overloadable))
+ atan(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ atan(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ atan(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ atan(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ atan(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ atan(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ atan(half4 v);
+#endif
+
+/*
+ * atan2: Inverse tangent of a ratio
+ *
+ * Returns the inverse tangent of (numerator / denominator), in radians.
+ *
+ * See also native_atan2().
+ *
+ * Parameters:
+ * numerator: Numerator.
+ * denominator: Denominator. Can be 0.
+ */
+extern float __attribute__((const, overloadable))
+ atan2(float numerator, float denominator);
+
+extern float2 __attribute__((const, overloadable))
+ atan2(float2 numerator, float2 denominator);
+
+extern float3 __attribute__((const, overloadable))
+ atan2(float3 numerator, float3 denominator);
+
+extern float4 __attribute__((const, overloadable))
+ atan2(float4 numerator, float4 denominator);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ atan2(half numerator, half denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ atan2(half2 numerator, half2 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ atan2(half3 numerator, half3 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ atan2(half4 numerator, half4 denominator);
+#endif
+
+/*
+ * atan2pi: Inverse tangent of a ratio, divided by pi
+ *
+ * Returns the inverse tangent of (numerator / denominator), in radians, divided by pi.
+ *
+ * To get an inverse tangent measured in degrees, use atan2pi(n, d) * 180.f.
+ *
+ * See also native_atan2pi().
+ *
+ * Parameters:
+ * numerator: Numerator.
+ * denominator: Denominator. Can be 0.
+ */
+extern float __attribute__((const, overloadable))
+ atan2pi(float numerator, float denominator);
+
+extern float2 __attribute__((const, overloadable))
+ atan2pi(float2 numerator, float2 denominator);
+
+extern float3 __attribute__((const, overloadable))
+ atan2pi(float3 numerator, float3 denominator);
+
+extern float4 __attribute__((const, overloadable))
+ atan2pi(float4 numerator, float4 denominator);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ atan2pi(half numerator, half denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ atan2pi(half2 numerator, half2 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ atan2pi(half3 numerator, half3 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ atan2pi(half4 numerator, half4 denominator);
+#endif
+
+/*
+ * atanh: Inverse hyperbolic tangent
+ *
+ * Returns the inverse hyperbolic tangent, in radians.
+ *
+ * See also native_atanh().
+ */
+extern float __attribute__((const, overloadable))
+ atanh(float v);
+
+extern float2 __attribute__((const, overloadable))
+ atanh(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ atanh(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ atanh(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ atanh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ atanh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ atanh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ atanh(half4 v);
+#endif
+
+/*
+ * atanpi: Inverse tangent divided by pi
+ *
+ * Returns the inverse tangent in radians, divided by pi.
+ *
+ * To get an inverse tangent measured in degrees, use atanpi(a) * 180.f.
+ *
+ * See also native_atanpi().
+ */
+extern float __attribute__((const, overloadable))
+ atanpi(float v);
+
+extern float2 __attribute__((const, overloadable))
+ atanpi(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ atanpi(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ atanpi(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ atanpi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ atanpi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ atanpi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ atanpi(half4 v);
+#endif
+
+/*
+ * cbrt: Cube root
+ *
+ * Returns the cube root.
+ *
+ * See also native_cbrt().
+ */
+extern float __attribute__((const, overloadable))
+ cbrt(float v);
+
+extern float2 __attribute__((const, overloadable))
+ cbrt(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ cbrt(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ cbrt(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ cbrt(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ cbrt(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ cbrt(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ cbrt(half4 v);
+#endif
+
+/*
+ * ceil: Smallest integer not less than a value
+ *
+ * Returns the smallest integer not less than a value.
+ *
+ * For example, ceil(1.2f) returns 2.f, and ceil(-1.2f) returns -1.f.
+ *
+ * See also floor().
+ */
+extern float __attribute__((const, overloadable))
+ ceil(float v);
+
+extern float2 __attribute__((const, overloadable))
+ ceil(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ ceil(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ ceil(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ ceil(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ ceil(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ ceil(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ ceil(half4 v);
+#endif
+
+/*
+ * clamp: Restrain a value to a range
+ *
+ * Clamps a value to a specified high and low bound. clamp() returns min_value
+ * if value < min_value, max_value if value > max_value, otherwise value.
+ *
+ * There are two variants of clamp: one where the min and max are scalars applied
+ * to all entries of the value, the other where the min and max are also vectors.
+ *
+ * If min_value is greater than max_value, the results are undefined.
+ *
+ * Parameters:
+ * value: Value to be clamped.
+ * min_value: Lower bound, a scalar or matching vector.
+ * max_value: High bound, must match the type of low.
+ */
+extern float __attribute__((const, overloadable))
+ clamp(float value, float min_value, float max_value);
+
+extern float2 __attribute__((const, overloadable))
+ clamp(float2 value, float2 min_value, float2 max_value);
+
+extern float3 __attribute__((const, overloadable))
+ clamp(float3 value, float3 min_value, float3 max_value);
+
+extern float4 __attribute__((const, overloadable))
+ clamp(float4 value, float4 min_value, float4 max_value);
+
+extern float2 __attribute__((const, overloadable))
+ clamp(float2 value, float min_value, float max_value);
+
+extern float3 __attribute__((const, overloadable))
+ clamp(float3 value, float min_value, float max_value);
+
+extern float4 __attribute__((const, overloadable))
+ clamp(float4 value, float min_value, float max_value);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern char __attribute__((const, overloadable))
+ clamp(char value, char min_value, char max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern char2 __attribute__((const, overloadable))
+ clamp(char2 value, char2 min_value, char2 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern char3 __attribute__((const, overloadable))
+ clamp(char3 value, char3 min_value, char3 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern char4 __attribute__((const, overloadable))
+ clamp(char4 value, char4 min_value, char4 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uchar __attribute__((const, overloadable))
+ clamp(uchar value, uchar min_value, uchar max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uchar2 __attribute__((const, overloadable))
+ clamp(uchar2 value, uchar2 min_value, uchar2 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uchar3 __attribute__((const, overloadable))
+ clamp(uchar3 value, uchar3 min_value, uchar3 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uchar4 __attribute__((const, overloadable))
+ clamp(uchar4 value, uchar4 min_value, uchar4 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern short __attribute__((const, overloadable))
+ clamp(short value, short min_value, short max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern short2 __attribute__((const, overloadable))
+ clamp(short2 value, short2 min_value, short2 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern short3 __attribute__((const, overloadable))
+ clamp(short3 value, short3 min_value, short3 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern short4 __attribute__((const, overloadable))
+ clamp(short4 value, short4 min_value, short4 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ushort __attribute__((const, overloadable))
+ clamp(ushort value, ushort min_value, ushort max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ushort2 __attribute__((const, overloadable))
+ clamp(ushort2 value, ushort2 min_value, ushort2 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ushort3 __attribute__((const, overloadable))
+ clamp(ushort3 value, ushort3 min_value, ushort3 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ushort4 __attribute__((const, overloadable))
+ clamp(ushort4 value, ushort4 min_value, ushort4 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern int __attribute__((const, overloadable))
+ clamp(int value, int min_value, int max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern int2 __attribute__((const, overloadable))
+ clamp(int2 value, int2 min_value, int2 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern int3 __attribute__((const, overloadable))
+ clamp(int3 value, int3 min_value, int3 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern int4 __attribute__((const, overloadable))
+ clamp(int4 value, int4 min_value, int4 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uint __attribute__((const, overloadable))
+ clamp(uint value, uint min_value, uint max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uint2 __attribute__((const, overloadable))
+ clamp(uint2 value, uint2 min_value, uint2 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uint3 __attribute__((const, overloadable))
+ clamp(uint3 value, uint3 min_value, uint3 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uint4 __attribute__((const, overloadable))
+ clamp(uint4 value, uint4 min_value, uint4 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern long __attribute__((const, overloadable))
+ clamp(long value, long min_value, long max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern long2 __attribute__((const, overloadable))
+ clamp(long2 value, long2 min_value, long2 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern long3 __attribute__((const, overloadable))
+ clamp(long3 value, long3 min_value, long3 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern long4 __attribute__((const, overloadable))
+ clamp(long4 value, long4 min_value, long4 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ulong __attribute__((const, overloadable))
+ clamp(ulong value, ulong min_value, ulong max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ulong2 __attribute__((const, overloadable))
+ clamp(ulong2 value, ulong2 min_value, ulong2 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ulong3 __attribute__((const, overloadable))
+ clamp(ulong3 value, ulong3 min_value, ulong3 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ulong4 __attribute__((const, overloadable))
+ clamp(ulong4 value, ulong4 min_value, ulong4 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern char2 __attribute__((const, overloadable))
+ clamp(char2 value, char min_value, char max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern char3 __attribute__((const, overloadable))
+ clamp(char3 value, char min_value, char max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern char4 __attribute__((const, overloadable))
+ clamp(char4 value, char min_value, char max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uchar2 __attribute__((const, overloadable))
+ clamp(uchar2 value, uchar min_value, uchar max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uchar3 __attribute__((const, overloadable))
+ clamp(uchar3 value, uchar min_value, uchar max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uchar4 __attribute__((const, overloadable))
+ clamp(uchar4 value, uchar min_value, uchar max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern short2 __attribute__((const, overloadable))
+ clamp(short2 value, short min_value, short max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern short3 __attribute__((const, overloadable))
+ clamp(short3 value, short min_value, short max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern short4 __attribute__((const, overloadable))
+ clamp(short4 value, short min_value, short max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ushort2 __attribute__((const, overloadable))
+ clamp(ushort2 value, ushort min_value, ushort max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ushort3 __attribute__((const, overloadable))
+ clamp(ushort3 value, ushort min_value, ushort max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ushort4 __attribute__((const, overloadable))
+ clamp(ushort4 value, ushort min_value, ushort max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern int2 __attribute__((const, overloadable))
+ clamp(int2 value, int min_value, int max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern int3 __attribute__((const, overloadable))
+ clamp(int3 value, int min_value, int max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern int4 __attribute__((const, overloadable))
+ clamp(int4 value, int min_value, int max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uint2 __attribute__((const, overloadable))
+ clamp(uint2 value, uint min_value, uint max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uint3 __attribute__((const, overloadable))
+ clamp(uint3 value, uint min_value, uint max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern uint4 __attribute__((const, overloadable))
+ clamp(uint4 value, uint min_value, uint max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern long2 __attribute__((const, overloadable))
+ clamp(long2 value, long min_value, long max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern long3 __attribute__((const, overloadable))
+ clamp(long3 value, long min_value, long max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern long4 __attribute__((const, overloadable))
+ clamp(long4 value, long min_value, long max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ulong2 __attribute__((const, overloadable))
+ clamp(ulong2 value, ulong min_value, ulong max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ulong3 __attribute__((const, overloadable))
+ clamp(ulong3 value, ulong min_value, ulong max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+extern ulong4 __attribute__((const, overloadable))
+ clamp(ulong4 value, ulong min_value, ulong max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ clamp(half value, half min_value, half max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ clamp(half2 value, half2 min_value, half2 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ clamp(half3 value, half3 min_value, half3 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ clamp(half4 value, half4 min_value, half4 max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ clamp(half2 value, half min_value, half max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ clamp(half3 value, half min_value, half max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ clamp(half4 value, half min_value, half max_value);
+#endif
+
+/*
+ * clz: Number of leading 0 bits
+ *
+ * Returns the number of leading 0-bits in a value.
+ *
+ * For example, clz((char)0x03) returns 6.
+ */
+extern char __attribute__((const, overloadable))
+ clz(char value);
+
+extern char2 __attribute__((const, overloadable))
+ clz(char2 value);
+
+extern char3 __attribute__((const, overloadable))
+ clz(char3 value);
+
+extern char4 __attribute__((const, overloadable))
+ clz(char4 value);
+
+extern uchar __attribute__((const, overloadable))
+ clz(uchar value);
+
+extern uchar2 __attribute__((const, overloadable))
+ clz(uchar2 value);
+
+extern uchar3 __attribute__((const, overloadable))
+ clz(uchar3 value);
+
+extern uchar4 __attribute__((const, overloadable))
+ clz(uchar4 value);
+
+extern short __attribute__((const, overloadable))
+ clz(short value);
+
+extern short2 __attribute__((const, overloadable))
+ clz(short2 value);
+
+extern short3 __attribute__((const, overloadable))
+ clz(short3 value);
+
+extern short4 __attribute__((const, overloadable))
+ clz(short4 value);
+
+extern ushort __attribute__((const, overloadable))
+ clz(ushort value);
+
+extern ushort2 __attribute__((const, overloadable))
+ clz(ushort2 value);
+
+extern ushort3 __attribute__((const, overloadable))
+ clz(ushort3 value);
+
+extern ushort4 __attribute__((const, overloadable))
+ clz(ushort4 value);
+
+extern int __attribute__((const, overloadable))
+ clz(int value);
+
+extern int2 __attribute__((const, overloadable))
+ clz(int2 value);
+
+extern int3 __attribute__((const, overloadable))
+ clz(int3 value);
+
+extern int4 __attribute__((const, overloadable))
+ clz(int4 value);
+
+extern uint __attribute__((const, overloadable))
+ clz(uint value);
+
+extern uint2 __attribute__((const, overloadable))
+ clz(uint2 value);
+
+extern uint3 __attribute__((const, overloadable))
+ clz(uint3 value);
+
+extern uint4 __attribute__((const, overloadable))
+ clz(uint4 value);
+
+/*
+ * copysign: Copies the sign of a number to another
+ *
+ * Copies the sign from sign_value to magnitude_value.
+ *
+ * The value returned is either magnitude_value or -magnitude_value.
+ *
+ * For example, copysign(4.0f, -2.7f) returns -4.0f and copysign(-4.0f, 2.7f) returns 4.0f.
+ */
+extern float __attribute__((const, overloadable))
+ copysign(float magnitude_value, float sign_value);
+
+extern float2 __attribute__((const, overloadable))
+ copysign(float2 magnitude_value, float2 sign_value);
+
+extern float3 __attribute__((const, overloadable))
+ copysign(float3 magnitude_value, float3 sign_value);
+
+extern float4 __attribute__((const, overloadable))
+ copysign(float4 magnitude_value, float4 sign_value);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ copysign(half magnitude_value, half sign_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ copysign(half2 magnitude_value, half2 sign_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ copysign(half3 magnitude_value, half3 sign_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ copysign(half4 magnitude_value, half4 sign_value);
+#endif
+
+/*
+ * cos: Cosine
+ *
+ * Returns the cosine of an angle measured in radians.
+ *
+ * See also native_cos().
+ */
+extern float __attribute__((const, overloadable))
+ cos(float v);
+
+extern float2 __attribute__((const, overloadable))
+ cos(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ cos(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ cos(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ cos(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ cos(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ cos(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ cos(half4 v);
+#endif
+
+/*
+ * cosh: Hypebolic cosine
+ *
+ * Returns the hypebolic cosine of v, where v is measured in radians.
+ *
+ * See also native_cosh().
+ */
+extern float __attribute__((const, overloadable))
+ cosh(float v);
+
+extern float2 __attribute__((const, overloadable))
+ cosh(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ cosh(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ cosh(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ cosh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ cosh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ cosh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ cosh(half4 v);
+#endif
+
+/*
+ * cospi: Cosine of a number multiplied by pi
+ *
+ * Returns the cosine of (v * pi), where (v * pi) is measured in radians.
+ *
+ * To get the cosine of a value measured in degrees, call cospi(v / 180.f).
+ *
+ * See also native_cospi().
+ */
+extern float __attribute__((const, overloadable))
+ cospi(float v);
+
+extern float2 __attribute__((const, overloadable))
+ cospi(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ cospi(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ cospi(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ cospi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ cospi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ cospi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ cospi(half4 v);
+#endif
+
+/*
+ * degrees: Converts radians into degrees
+ *
+ * Converts from radians to degrees.
+ */
+extern float __attribute__((const, overloadable))
+ degrees(float v);
+
+extern float2 __attribute__((const, overloadable))
+ degrees(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ degrees(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ degrees(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ degrees(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ degrees(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ degrees(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ degrees(half4 v);
+#endif
+
+/*
+ * erf: Mathematical error function
+ *
+ * Returns the error function.
+ */
+extern float __attribute__((const, overloadable))
+ erf(float v);
+
+extern float2 __attribute__((const, overloadable))
+ erf(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ erf(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ erf(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ erf(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ erf(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ erf(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ erf(half4 v);
+#endif
+
+/*
+ * erfc: Mathematical complementary error function
+ *
+ * Returns the complementary error function.
+ */
+extern float __attribute__((const, overloadable))
+ erfc(float v);
+
+extern float2 __attribute__((const, overloadable))
+ erfc(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ erfc(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ erfc(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ erfc(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ erfc(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ erfc(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ erfc(half4 v);
+#endif
+
+/*
+ * exp: e raised to a number
+ *
+ * Returns e raised to v, i.e. e ^ v.
+ *
+ * See also native_exp().
+ */
+extern float __attribute__((const, overloadable))
+ exp(float v);
+
+extern float2 __attribute__((const, overloadable))
+ exp(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ exp(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ exp(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ exp(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ exp(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ exp(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ exp(half4 v);
+#endif
+
+/*
+ * exp10: 10 raised to a number
+ *
+ * Returns 10 raised to v, i.e. 10.f ^ v.
+ *
+ * See also native_exp10().
+ */
+extern float __attribute__((const, overloadable))
+ exp10(float v);
+
+extern float2 __attribute__((const, overloadable))
+ exp10(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ exp10(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ exp10(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ exp10(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ exp10(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ exp10(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ exp10(half4 v);
+#endif
+
+/*
+ * exp2: 2 raised to a number
+ *
+ * Returns 2 raised to v, i.e. 2.f ^ v.
+ *
+ * See also native_exp2().
+ */
+extern float __attribute__((const, overloadable))
+ exp2(float v);
+
+extern float2 __attribute__((const, overloadable))
+ exp2(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ exp2(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ exp2(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ exp2(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ exp2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ exp2(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ exp2(half4 v);
+#endif
+
+/*
+ * expm1: e raised to a number minus one
+ *
+ * Returns e raised to v minus 1, i.e. (e ^ v) - 1.
+ *
+ * See also native_expm1().
+ */
+extern float __attribute__((const, overloadable))
+ expm1(float v);
+
+extern float2 __attribute__((const, overloadable))
+ expm1(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ expm1(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ expm1(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ expm1(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ expm1(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ expm1(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ expm1(half4 v);
+#endif
+
+/*
+ * fabs: Absolute value of a float
+ *
+ * Returns the absolute value of the float v.
+ *
+ * For integers, use abs().
+ */
+extern float __attribute__((const, overloadable))
+ fabs(float v);
+
+extern float2 __attribute__((const, overloadable))
+ fabs(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ fabs(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ fabs(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ fabs(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ fabs(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ fabs(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ fabs(half4 v);
+#endif
+
+/*
+ * fdim: Positive difference between two values
+ *
+ * Returns the positive difference between two values.
+ *
+ * If a > b, returns (a - b) otherwise returns 0f.
+ */
+extern float __attribute__((const, overloadable))
+ fdim(float a, float b);
+
+extern float2 __attribute__((const, overloadable))
+ fdim(float2 a, float2 b);
+
+extern float3 __attribute__((const, overloadable))
+ fdim(float3 a, float3 b);
+
+extern float4 __attribute__((const, overloadable))
+ fdim(float4 a, float4 b);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ fdim(half a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ fdim(half2 a, half2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ fdim(half3 a, half3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ fdim(half4 a, half4 b);
+#endif
+
+/*
+ * floor: Smallest integer not greater than a value
+ *
+ * Returns the smallest integer not greater than a value.
+ *
+ * For example, floor(1.2f) returns 1.f, and floor(-1.2f) returns -2.f.
+ *
+ * See also ceil().
+ */
+extern float __attribute__((const, overloadable))
+ floor(float v);
+
+extern float2 __attribute__((const, overloadable))
+ floor(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ floor(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ floor(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ floor(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ floor(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ floor(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ floor(half4 v);
+#endif
+
+/*
+ * fma: Multiply and add
+ *
+ * Multiply and add. Returns (multiplicand1 * multiplicand2) + offset.
+ *
+ * This function is similar to mad(). fma() retains full precision of the multiplied result
+ * and rounds only after the addition. mad() rounds after the multiplication and the addition.
+ * This extra precision is not guaranteed in rs_fp_relaxed mode.
+ */
+extern float __attribute__((const, overloadable))
+ fma(float multiplicand1, float multiplicand2, float offset);
+
+extern float2 __attribute__((const, overloadable))
+ fma(float2 multiplicand1, float2 multiplicand2, float2 offset);
+
+extern float3 __attribute__((const, overloadable))
+ fma(float3 multiplicand1, float3 multiplicand2, float3 offset);
+
+extern float4 __attribute__((const, overloadable))
+ fma(float4 multiplicand1, float4 multiplicand2, float4 offset);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ fma(half multiplicand1, half multiplicand2, half offset);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ fma(half2 multiplicand1, half2 multiplicand2, half2 offset);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ fma(half3 multiplicand1, half3 multiplicand2, half3 offset);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ fma(half4 multiplicand1, half4 multiplicand2, half4 offset);
+#endif
+
+/*
+ * fmax: Maximum of two floats
+ *
+ * Returns the maximum of a and b, i.e. (a < b ? b : a).
+ *
+ * The max() function returns identical results but can be applied to more data types.
+ */
+extern float __attribute__((const, overloadable))
+ fmax(float a, float b);
+
+extern float2 __attribute__((const, overloadable))
+ fmax(float2 a, float2 b);
+
+extern float3 __attribute__((const, overloadable))
+ fmax(float3 a, float3 b);
+
+extern float4 __attribute__((const, overloadable))
+ fmax(float4 a, float4 b);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ fmax(half a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ fmax(half2 a, half2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ fmax(half3 a, half3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ fmax(half4 a, half4 b);
+#endif
+
+extern float2 __attribute__((const, overloadable))
+ fmax(float2 a, float b);
+
+extern float3 __attribute__((const, overloadable))
+ fmax(float3 a, float b);
+
+extern float4 __attribute__((const, overloadable))
+ fmax(float4 a, float b);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ fmax(half2 a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ fmax(half3 a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ fmax(half4 a, half b);
+#endif
+
+/*
+ * fmin: Minimum of two floats
+ *
+ * Returns the minimum of a and b, i.e. (a > b ? b : a).
+ *
+ * The min() function returns identical results but can be applied to more data types.
+ */
+extern float __attribute__((const, overloadable))
+ fmin(float a, float b);
+
+extern float2 __attribute__((const, overloadable))
+ fmin(float2 a, float2 b);
+
+extern float3 __attribute__((const, overloadable))
+ fmin(float3 a, float3 b);
+
+extern float4 __attribute__((const, overloadable))
+ fmin(float4 a, float4 b);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ fmin(half a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ fmin(half2 a, half2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ fmin(half3 a, half3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ fmin(half4 a, half4 b);
+#endif
+
+extern float2 __attribute__((const, overloadable))
+ fmin(float2 a, float b);
+
+extern float3 __attribute__((const, overloadable))
+ fmin(float3 a, float b);
+
+extern float4 __attribute__((const, overloadable))
+ fmin(float4 a, float b);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ fmin(half2 a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ fmin(half3 a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ fmin(half4 a, half b);
+#endif
+
+/*
+ * fmod: Modulo
+ *
+ * Returns the remainder of (numerator / denominator), where the quotient is rounded towards zero.
+ *
+ * The function remainder() is similar but rounds toward the closest interger.
+ * For example, fmod(-3.8f, 2.f) returns -1.8f (-3.8f - -1.f * 2.f)
+ * while remainder(-3.8f, 2.f) returns 0.2f (-3.8f - -2.f * 2.f).
+ */
+extern float __attribute__((const, overloadable))
+ fmod(float numerator, float denominator);
+
+extern float2 __attribute__((const, overloadable))
+ fmod(float2 numerator, float2 denominator);
+
+extern float3 __attribute__((const, overloadable))
+ fmod(float3 numerator, float3 denominator);
+
+extern float4 __attribute__((const, overloadable))
+ fmod(float4 numerator, float4 denominator);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ fmod(half numerator, half denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ fmod(half2 numerator, half2 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ fmod(half3 numerator, half3 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ fmod(half4 numerator, half4 denominator);
+#endif
+
+/*
+ * fract: Positive fractional part
+ *
+ * Returns the positive fractional part of v, i.e. v - floor(v).
+ *
+ * For example, fract(1.3f, &val) returns 0.3f and sets val to 1.f.
+ * fract(-1.3f, &val) returns 0.7f and sets val to -2.f.
+ *
+ * Parameters:
+ * v: Input value.
+ * floor: If floor is not null, *floor will be set to the floor of v.
+ */
+extern float __attribute__((overloadable))
+ fract(float v, float* floor);
+
+extern float2 __attribute__((overloadable))
+ fract(float2 v, float2* floor);
+
+extern float3 __attribute__((overloadable))
+ fract(float3 v, float3* floor);
+
+extern float4 __attribute__((overloadable))
+ fract(float4 v, float4* floor);
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline float __attribute__((const, overloadable))
+ fract(float v) {
+ float unused;
+ return fract(v, &unused);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline float2 __attribute__((const, overloadable))
+ fract(float2 v) {
+ float2 unused;
+ return fract(v, &unused);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline float3 __attribute__((const, overloadable))
+ fract(float3 v) {
+ float3 unused;
+ return fract(v, &unused);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline float4 __attribute__((const, overloadable))
+ fract(float4 v) {
+ float4 unused;
+ return fract(v, &unused);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern float __attribute__((overloadable))
+ fract(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern float2 __attribute__((overloadable))
+ fract(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern float3 __attribute__((overloadable))
+ fract(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern float4 __attribute__((overloadable))
+ fract(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((overloadable))
+ fract(half v, half* floor);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((overloadable))
+ fract(half2 v, half2* floor);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((overloadable))
+ fract(half3 v, half3* floor);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((overloadable))
+ fract(half4 v, half4* floor);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((overloadable))
+ fract(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((overloadable))
+ fract(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((overloadable))
+ fract(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((overloadable))
+ fract(half4 v);
+#endif
+
+/*
+ * frexp: Binary mantissa and exponent
+ *
+ * Returns the binary mantissa and exponent of v, i.e. v == mantissa * 2 ^ exponent.
+ *
+ * The mantissa is always between 0.5 (inclusive) and 1.0 (exclusive).
+ *
+ * See ldexp() for the reverse operation. See also logb() and ilogb().
+ *
+ * Parameters:
+ * v: Input value.
+ * exponent: If exponent is not null, *exponent will be set to the exponent of v.
+ */
+extern float __attribute__((overloadable))
+ frexp(float v, int* exponent);
+
+extern float2 __attribute__((overloadable))
+ frexp(float2 v, int2* exponent);
+
+extern float3 __attribute__((overloadable))
+ frexp(float3 v, int3* exponent);
+
+extern float4 __attribute__((overloadable))
+ frexp(float4 v, int4* exponent);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((overloadable))
+ frexp(half v, int* exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((overloadable))
+ frexp(half2 v, int2* exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((overloadable))
+ frexp(half3 v, int3* exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((overloadable))
+ frexp(half4 v, int4* exponent);
+#endif
+
+/*
+ * half_recip: Reciprocal computed to 16 bit precision
+ *
+ * Returns the approximate reciprocal of a value.
+ *
+ * The precision is that of a 16 bit floating point value.
+ *
+ * See also native_recip().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ half_recip(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float2 __attribute__((const, overloadable))
+ half_recip(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float3 __attribute__((const, overloadable))
+ half_recip(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float4 __attribute__((const, overloadable))
+ half_recip(float4 v);
+#endif
+
+/*
+ * half_rsqrt: Reciprocal of a square root computed to 16 bit precision
+ *
+ * Returns the approximate value of (1.f / sqrt(value)).
+ *
+ * The precision is that of a 16 bit floating point value.
+ *
+ * See also rsqrt(), native_rsqrt().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ half_rsqrt(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float2 __attribute__((const, overloadable))
+ half_rsqrt(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float3 __attribute__((const, overloadable))
+ half_rsqrt(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float4 __attribute__((const, overloadable))
+ half_rsqrt(float4 v);
+#endif
+
+/*
+ * half_sqrt: Square root computed to 16 bit precision
+ *
+ * Returns the approximate square root of a value.
+ *
+ * The precision is that of a 16 bit floating point value.
+ *
+ * See also sqrt(), native_sqrt().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ half_sqrt(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float2 __attribute__((const, overloadable))
+ half_sqrt(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float3 __attribute__((const, overloadable))
+ half_sqrt(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float4 __attribute__((const, overloadable))
+ half_sqrt(float4 v);
+#endif
+
+/*
+ * hypot: Hypotenuse
+ *
+ * Returns the hypotenuse, i.e. sqrt(a * a + b * b).
+ *
+ * See also native_hypot().
+ */
+extern float __attribute__((const, overloadable))
+ hypot(float a, float b);
+
+extern float2 __attribute__((const, overloadable))
+ hypot(float2 a, float2 b);
+
+extern float3 __attribute__((const, overloadable))
+ hypot(float3 a, float3 b);
+
+extern float4 __attribute__((const, overloadable))
+ hypot(float4 a, float4 b);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ hypot(half a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ hypot(half2 a, half2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ hypot(half3 a, half3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ hypot(half4 a, half4 b);
+#endif
+
+/*
+ * ilogb: Base two exponent
+ *
+ * Returns the base two exponent of a value, where the mantissa is between
+ * 1.f (inclusive) and 2.f (exclusive).
+ *
+ * For example, ilogb(8.5f) returns 3.
+ *
+ * Because of the difference in mantissa, this number is one less than is returned by frexp().
+ *
+ * logb() is similar but returns a float.
+ */
+extern int __attribute__((const, overloadable))
+ ilogb(float v);
+
+extern int2 __attribute__((const, overloadable))
+ ilogb(float2 v);
+
+extern int3 __attribute__((const, overloadable))
+ ilogb(float3 v);
+
+extern int4 __attribute__((const, overloadable))
+ ilogb(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern int __attribute__((const, overloadable))
+ ilogb(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern int2 __attribute__((const, overloadable))
+ ilogb(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern int3 __attribute__((const, overloadable))
+ ilogb(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern int4 __attribute__((const, overloadable))
+ ilogb(half4 v);
+#endif
+
+/*
+ * ldexp: Creates a floating point from mantissa and exponent
+ *
+ * Returns the floating point created from the mantissa and exponent,
+ * i.e. (mantissa * 2 ^ exponent).
+ *
+ * See frexp() for the reverse operation.
+ *
+ * Parameters:
+ * mantissa: Mantissa.
+ * exponent: Exponent, a single component or matching vector.
+ */
+extern float __attribute__((const, overloadable))
+ ldexp(float mantissa, int exponent);
+
+extern float2 __attribute__((const, overloadable))
+ ldexp(float2 mantissa, int2 exponent);
+
+extern float3 __attribute__((const, overloadable))
+ ldexp(float3 mantissa, int3 exponent);
+
+extern float4 __attribute__((const, overloadable))
+ ldexp(float4 mantissa, int4 exponent);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ ldexp(half mantissa, int exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ ldexp(half2 mantissa, int2 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ ldexp(half3 mantissa, int3 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ ldexp(half4 mantissa, int4 exponent);
+#endif
+
+extern float2 __attribute__((const, overloadable))
+ ldexp(float2 mantissa, int exponent);
+
+extern float3 __attribute__((const, overloadable))
+ ldexp(float3 mantissa, int exponent);
+
+extern float4 __attribute__((const, overloadable))
+ ldexp(float4 mantissa, int exponent);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ ldexp(half2 mantissa, int exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ ldexp(half3 mantissa, int exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ ldexp(half4 mantissa, int exponent);
+#endif
+
+/*
+ * lgamma: Natural logarithm of the gamma function
+ *
+ * Returns the natural logarithm of the absolute value of the gamma function,
+ * i.e. log(fabs(tgamma(v))).
+ *
+ * See also tgamma().
+ *
+ * Parameters:
+ * sign_of_gamma: If sign_of_gamma is not null, *sign_of_gamma will be set to -1.f if the gamma of v is negative, otherwise to 1.f.
+ */
+extern float __attribute__((const, overloadable))
+ lgamma(float v);
+
+extern float2 __attribute__((const, overloadable))
+ lgamma(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ lgamma(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ lgamma(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ lgamma(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ lgamma(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ lgamma(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ lgamma(half4 v);
+#endif
+
+extern float __attribute__((overloadable))
+ lgamma(float v, int* sign_of_gamma);
+
+extern float2 __attribute__((overloadable))
+ lgamma(float2 v, int2* sign_of_gamma);
+
+extern float3 __attribute__((overloadable))
+ lgamma(float3 v, int3* sign_of_gamma);
+
+extern float4 __attribute__((overloadable))
+ lgamma(float4 v, int4* sign_of_gamma);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((overloadable))
+ lgamma(half v, int* sign_of_gamma);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((overloadable))
+ lgamma(half2 v, int2* sign_of_gamma);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((overloadable))
+ lgamma(half3 v, int3* sign_of_gamma);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((overloadable))
+ lgamma(half4 v, int4* sign_of_gamma);
+#endif
+
+/*
+ * log: Natural logarithm
+ *
+ * Returns the natural logarithm.
+ *
+ * See also native_log().
+ */
+extern float __attribute__((const, overloadable))
+ log(float v);
+
+extern float2 __attribute__((const, overloadable))
+ log(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ log(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ log(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ log(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ log(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ log(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ log(half4 v);
+#endif
+
+/*
+ * log10: Base 10 logarithm
+ *
+ * Returns the base 10 logarithm.
+ *
+ * See also native_log10().
+ */
+extern float __attribute__((const, overloadable))
+ log10(float v);
+
+extern float2 __attribute__((const, overloadable))
+ log10(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ log10(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ log10(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ log10(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ log10(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ log10(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ log10(half4 v);
+#endif
+
+/*
+ * log1p: Natural logarithm of a value plus 1
+ *
+ * Returns the natural logarithm of (v + 1.f).
+ *
+ * See also native_log1p().
+ */
+extern float __attribute__((const, overloadable))
+ log1p(float v);
+
+extern float2 __attribute__((const, overloadable))
+ log1p(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ log1p(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ log1p(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ log1p(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ log1p(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ log1p(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ log1p(half4 v);
+#endif
+
+/*
+ * log2: Base 2 logarithm
+ *
+ * Returns the base 2 logarithm.
+ *
+ * See also native_log2().
+ */
+extern float __attribute__((const, overloadable))
+ log2(float v);
+
+extern float2 __attribute__((const, overloadable))
+ log2(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ log2(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ log2(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ log2(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ log2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ log2(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ log2(half4 v);
+#endif
+
+/*
+ * logb: Base two exponent
+ *
+ * Returns the base two exponent of a value, where the mantissa is between
+ * 1.f (inclusive) and 2.f (exclusive).
+ *
+ * For example, logb(8.5f) returns 3.f.
+ *
+ * Because of the difference in mantissa, this number is one less than is returned by frexp().
+ *
+ * ilogb() is similar but returns an integer.
+ */
+extern float __attribute__((const, overloadable))
+ logb(float v);
+
+extern float2 __attribute__((const, overloadable))
+ logb(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ logb(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ logb(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ logb(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ logb(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ logb(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ logb(half4 v);
+#endif
+
+/*
+ * mad: Multiply and add
+ *
+ * Multiply and add. Returns (multiplicand1 * multiplicand2) + offset.
+ *
+ * This function is similar to fma(). fma() retains full precision of the multiplied result
+ * and rounds only after the addition. mad() rounds after the multiplication and the addition.
+ * In rs_fp_relaxed mode, mad() may not do the rounding after multiplicaiton.
+ */
+extern float __attribute__((const, overloadable))
+ mad(float multiplicand1, float multiplicand2, float offset);
+
+extern float2 __attribute__((const, overloadable))
+ mad(float2 multiplicand1, float2 multiplicand2, float2 offset);
+
+extern float3 __attribute__((const, overloadable))
+ mad(float3 multiplicand1, float3 multiplicand2, float3 offset);
+
+extern float4 __attribute__((const, overloadable))
+ mad(float4 multiplicand1, float4 multiplicand2, float4 offset);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ mad(half multiplicand1, half multiplicand2, half offset);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ mad(half2 multiplicand1, half2 multiplicand2, half2 offset);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ mad(half3 multiplicand1, half3 multiplicand2, half3 offset);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ mad(half4 multiplicand1, half4 multiplicand2, half4 offset);
+#endif
+
+/*
+ * max: Maximum
+ *
+ * Returns the maximum value of two arguments.
+ */
+extern float __attribute__((const, overloadable))
+ max(float a, float b);
+
+extern float2 __attribute__((const, overloadable))
+ max(float2 a, float2 b);
+
+extern float3 __attribute__((const, overloadable))
+ max(float3 a, float3 b);
+
+extern float4 __attribute__((const, overloadable))
+ max(float4 a, float4 b);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ max(half a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ max(half2 a, half2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ max(half3 a, half3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ max(half4 a, half4 b);
+#endif
+
+extern float2 __attribute__((const, overloadable))
+ max(float2 a, float b);
+
+extern float3 __attribute__((const, overloadable))
+ max(float3 a, float b);
+
+extern float4 __attribute__((const, overloadable))
+ max(float4 a, float b);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ max(half2 a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ max(half3 a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ max(half4 a, half b);
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline char __attribute__((const, overloadable))
+ max(char a, char b) {
+ return (a > b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uchar __attribute__((const, overloadable))
+ max(uchar a, uchar b) {
+ return (a > b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline short __attribute__((const, overloadable))
+ max(short a, short b) {
+ return (a > b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline ushort __attribute__((const, overloadable))
+ max(ushort a, ushort b) {
+ return (a > b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline int __attribute__((const, overloadable))
+ max(int a, int b) {
+ return (a > b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uint __attribute__((const, overloadable))
+ max(uint a, uint b) {
+ return (a > b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline char2 __attribute__((const, overloadable))
+ max(char2 a, char2 b) {
+ char2 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uchar2 __attribute__((const, overloadable))
+ max(uchar2 a, uchar2 b) {
+ uchar2 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline short2 __attribute__((const, overloadable))
+ max(short2 a, short2 b) {
+ short2 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline ushort2 __attribute__((const, overloadable))
+ max(ushort2 a, ushort2 b) {
+ ushort2 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline int2 __attribute__((const, overloadable))
+ max(int2 a, int2 b) {
+ int2 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uint2 __attribute__((const, overloadable))
+ max(uint2 a, uint2 b) {
+ uint2 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline char3 __attribute__((const, overloadable))
+ max(char3 a, char3 b) {
+ char3 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uchar3 __attribute__((const, overloadable))
+ max(uchar3 a, uchar3 b) {
+ uchar3 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline short3 __attribute__((const, overloadable))
+ max(short3 a, short3 b) {
+ short3 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline ushort3 __attribute__((const, overloadable))
+ max(ushort3 a, ushort3 b) {
+ ushort3 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline int3 __attribute__((const, overloadable))
+ max(int3 a, int3 b) {
+ int3 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uint3 __attribute__((const, overloadable))
+ max(uint3 a, uint3 b) {
+ uint3 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline char4 __attribute__((const, overloadable))
+ max(char4 a, char4 b) {
+ char4 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ tmp.w = (a.w > b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uchar4 __attribute__((const, overloadable))
+ max(uchar4 a, uchar4 b) {
+ uchar4 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ tmp.w = (a.w > b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline short4 __attribute__((const, overloadable))
+ max(short4 a, short4 b) {
+ short4 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ tmp.w = (a.w > b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline ushort4 __attribute__((const, overloadable))
+ max(ushort4 a, ushort4 b) {
+ ushort4 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ tmp.w = (a.w > b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline int4 __attribute__((const, overloadable))
+ max(int4 a, int4 b) {
+ int4 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ tmp.w = (a.w > b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uint4 __attribute__((const, overloadable))
+ max(uint4 a, uint4 b) {
+ uint4 tmp;
+ tmp.x = (a.x > b.x ? a.x : b.x);
+ tmp.y = (a.y > b.y ? a.y : b.y);
+ tmp.z = (a.z > b.z ? a.z : b.z);
+ tmp.w = (a.w > b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char __attribute__((const, overloadable))
+ max(char a, char b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char2 __attribute__((const, overloadable))
+ max(char2 a, char2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char3 __attribute__((const, overloadable))
+ max(char3 a, char3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char4 __attribute__((const, overloadable))
+ max(char4 a, char4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar __attribute__((const, overloadable))
+ max(uchar a, uchar b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar2 __attribute__((const, overloadable))
+ max(uchar2 a, uchar2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar3 __attribute__((const, overloadable))
+ max(uchar3 a, uchar3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar4 __attribute__((const, overloadable))
+ max(uchar4 a, uchar4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short __attribute__((const, overloadable))
+ max(short a, short b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short2 __attribute__((const, overloadable))
+ max(short2 a, short2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short3 __attribute__((const, overloadable))
+ max(short3 a, short3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short4 __attribute__((const, overloadable))
+ max(short4 a, short4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort __attribute__((const, overloadable))
+ max(ushort a, ushort b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort2 __attribute__((const, overloadable))
+ max(ushort2 a, ushort2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort3 __attribute__((const, overloadable))
+ max(ushort3 a, ushort3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort4 __attribute__((const, overloadable))
+ max(ushort4 a, ushort4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int __attribute__((const, overloadable))
+ max(int a, int b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int2 __attribute__((const, overloadable))
+ max(int2 a, int2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int3 __attribute__((const, overloadable))
+ max(int3 a, int3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int4 __attribute__((const, overloadable))
+ max(int4 a, int4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint __attribute__((const, overloadable))
+ max(uint a, uint b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint2 __attribute__((const, overloadable))
+ max(uint2 a, uint2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint3 __attribute__((const, overloadable))
+ max(uint3 a, uint3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint4 __attribute__((const, overloadable))
+ max(uint4 a, uint4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long __attribute__((const, overloadable))
+ max(long a, long b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ max(long2 a, long2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ max(long3 a, long3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ max(long4 a, long4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong __attribute__((const, overloadable))
+ max(ulong a, ulong b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ max(ulong2 a, ulong2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ max(ulong3 a, ulong3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ max(ulong4 a, ulong4 b);
+#endif
+
+/*
+ * min: Minimum
+ *
+ * Returns the minimum value of two arguments.
+ */
+extern float __attribute__((const, overloadable))
+ min(float a, float b);
+
+extern float2 __attribute__((const, overloadable))
+ min(float2 a, float2 b);
+
+extern float3 __attribute__((const, overloadable))
+ min(float3 a, float3 b);
+
+extern float4 __attribute__((const, overloadable))
+ min(float4 a, float4 b);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ min(half a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ min(half2 a, half2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ min(half3 a, half3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ min(half4 a, half4 b);
+#endif
+
+extern float2 __attribute__((const, overloadable))
+ min(float2 a, float b);
+
+extern float3 __attribute__((const, overloadable))
+ min(float3 a, float b);
+
+extern float4 __attribute__((const, overloadable))
+ min(float4 a, float b);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ min(half2 a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ min(half3 a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ min(half4 a, half b);
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline char __attribute__((const, overloadable))
+ min(char a, char b) {
+ return (a < b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uchar __attribute__((const, overloadable))
+ min(uchar a, uchar b) {
+ return (a < b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline short __attribute__((const, overloadable))
+ min(short a, short b) {
+ return (a < b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline ushort __attribute__((const, overloadable))
+ min(ushort a, ushort b) {
+ return (a < b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline int __attribute__((const, overloadable))
+ min(int a, int b) {
+ return (a < b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uint __attribute__((const, overloadable))
+ min(uint a, uint b) {
+ return (a < b ? a : b);
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline char2 __attribute__((const, overloadable))
+ min(char2 a, char2 b) {
+ char2 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uchar2 __attribute__((const, overloadable))
+ min(uchar2 a, uchar2 b) {
+ uchar2 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline short2 __attribute__((const, overloadable))
+ min(short2 a, short2 b) {
+ short2 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline ushort2 __attribute__((const, overloadable))
+ min(ushort2 a, ushort2 b) {
+ ushort2 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline int2 __attribute__((const, overloadable))
+ min(int2 a, int2 b) {
+ int2 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uint2 __attribute__((const, overloadable))
+ min(uint2 a, uint2 b) {
+ uint2 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline char3 __attribute__((const, overloadable))
+ min(char3 a, char3 b) {
+ char3 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uchar3 __attribute__((const, overloadable))
+ min(uchar3 a, uchar3 b) {
+ uchar3 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline short3 __attribute__((const, overloadable))
+ min(short3 a, short3 b) {
+ short3 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline ushort3 __attribute__((const, overloadable))
+ min(ushort3 a, ushort3 b) {
+ ushort3 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline int3 __attribute__((const, overloadable))
+ min(int3 a, int3 b) {
+ int3 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uint3 __attribute__((const, overloadable))
+ min(uint3 a, uint3 b) {
+ uint3 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline char4 __attribute__((const, overloadable))
+ min(char4 a, char4 b) {
+ char4 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ tmp.w = (a.w < b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uchar4 __attribute__((const, overloadable))
+ min(uchar4 a, uchar4 b) {
+ uchar4 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ tmp.w = (a.w < b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline short4 __attribute__((const, overloadable))
+ min(short4 a, short4 b) {
+ short4 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ tmp.w = (a.w < b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline ushort4 __attribute__((const, overloadable))
+ min(ushort4 a, ushort4 b) {
+ ushort4 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ tmp.w = (a.w < b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline int4 __attribute__((const, overloadable))
+ min(int4 a, int4 b) {
+ int4 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ tmp.w = (a.w < b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+static inline uint4 __attribute__((const, overloadable))
+ min(uint4 a, uint4 b) {
+ uint4 tmp;
+ tmp.x = (a.x < b.x ? a.x : b.x);
+ tmp.y = (a.y < b.y ? a.y : b.y);
+ tmp.z = (a.z < b.z ? a.z : b.z);
+ tmp.w = (a.w < b.w ? a.w : b.w);
+ return tmp;
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char __attribute__((const, overloadable))
+ min(char a, char b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char2 __attribute__((const, overloadable))
+ min(char2 a, char2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char3 __attribute__((const, overloadable))
+ min(char3 a, char3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern char4 __attribute__((const, overloadable))
+ min(char4 a, char4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar __attribute__((const, overloadable))
+ min(uchar a, uchar b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar2 __attribute__((const, overloadable))
+ min(uchar2 a, uchar2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar3 __attribute__((const, overloadable))
+ min(uchar3 a, uchar3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uchar4 __attribute__((const, overloadable))
+ min(uchar4 a, uchar4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short __attribute__((const, overloadable))
+ min(short a, short b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short2 __attribute__((const, overloadable))
+ min(short2 a, short2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short3 __attribute__((const, overloadable))
+ min(short3 a, short3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern short4 __attribute__((const, overloadable))
+ min(short4 a, short4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort __attribute__((const, overloadable))
+ min(ushort a, ushort b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort2 __attribute__((const, overloadable))
+ min(ushort2 a, ushort2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort3 __attribute__((const, overloadable))
+ min(ushort3 a, ushort3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ushort4 __attribute__((const, overloadable))
+ min(ushort4 a, ushort4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int __attribute__((const, overloadable))
+ min(int a, int b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int2 __attribute__((const, overloadable))
+ min(int2 a, int2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int3 __attribute__((const, overloadable))
+ min(int3 a, int3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern int4 __attribute__((const, overloadable))
+ min(int4 a, int4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint __attribute__((const, overloadable))
+ min(uint a, uint b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint2 __attribute__((const, overloadable))
+ min(uint2 a, uint2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint3 __attribute__((const, overloadable))
+ min(uint3 a, uint3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern uint4 __attribute__((const, overloadable))
+ min(uint4 a, uint4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long __attribute__((const, overloadable))
+ min(long a, long b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long2 __attribute__((const, overloadable))
+ min(long2 a, long2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long3 __attribute__((const, overloadable))
+ min(long3 a, long3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern long4 __attribute__((const, overloadable))
+ min(long4 a, long4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong __attribute__((const, overloadable))
+ min(ulong a, ulong b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong2 __attribute__((const, overloadable))
+ min(ulong2 a, ulong2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong3 __attribute__((const, overloadable))
+ min(ulong3 a, ulong3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern ulong4 __attribute__((const, overloadable))
+ min(ulong4 a, ulong4 b);
+#endif
+
+/*
+ * mix: Mixes two values
+ *
+ * Returns start + ((stop - start) * fraction).
+ *
+ * This can be useful for mixing two values. For example, to create a new color that is
+ * 40% color1 and 60% color2, use mix(color1, color2, 0.6f).
+ */
+extern float __attribute__((const, overloadable))
+ mix(float start, float stop, float fraction);
+
+extern float2 __attribute__((const, overloadable))
+ mix(float2 start, float2 stop, float2 fraction);
+
+extern float3 __attribute__((const, overloadable))
+ mix(float3 start, float3 stop, float3 fraction);
+
+extern float4 __attribute__((const, overloadable))
+ mix(float4 start, float4 stop, float4 fraction);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ mix(half start, half stop, half fraction);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ mix(half2 start, half2 stop, half2 fraction);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ mix(half3 start, half3 stop, half3 fraction);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ mix(half4 start, half4 stop, half4 fraction);
+#endif
+
+extern float2 __attribute__((const, overloadable))
+ mix(float2 start, float2 stop, float fraction);
+
+extern float3 __attribute__((const, overloadable))
+ mix(float3 start, float3 stop, float fraction);
+
+extern float4 __attribute__((const, overloadable))
+ mix(float4 start, float4 stop, float fraction);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ mix(half2 start, half2 stop, half fraction);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ mix(half3 start, half3 stop, half fraction);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ mix(half4 start, half4 stop, half fraction);
+#endif
+
+/*
+ * modf: Integral and fractional components
+ *
+ * Returns the integral and fractional components of a number.
+ *
+ * Both components will have the same sign as x. For example, for an input of -3.72f,
+ * *integral_part will be set to -3.f and .72f will be returned.
+ *
+ * Parameters:
+ * v: Source value.
+ * integral_part: *integral_part will be set to the integral portion of the number.
+ *
+ * Returns: Floating point portion of the value.
+ */
+extern float __attribute__((overloadable))
+ modf(float v, float* integral_part);
+
+extern float2 __attribute__((overloadable))
+ modf(float2 v, float2* integral_part);
+
+extern float3 __attribute__((overloadable))
+ modf(float3 v, float3* integral_part);
+
+extern float4 __attribute__((overloadable))
+ modf(float4 v, float4* integral_part);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((overloadable))
+ modf(half v, half* integral_part);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((overloadable))
+ modf(half2 v, half2* integral_part);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((overloadable))
+ modf(half3 v, half3* integral_part);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((overloadable))
+ modf(half4 v, half4* integral_part);
+#endif
+
+/*
+ * nan: Not a Number
+ *
+ * Returns a NaN value (Not a Number).
+ *
+ * Parameters:
+ * v: Not used.
+ */
+extern float __attribute__((const, overloadable))
+ nan(uint v);
+
+/*
+ * nan_half: Not a Number
+ *
+ * Returns a half-precision floating point NaN value (Not a Number).
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ nan_half(void);
+#endif
+
+/*
+ * native_acos: Approximate inverse cosine
+ *
+ * Returns the approximate inverse cosine, in radians.
+ *
+ * This function yields undefined results from input values less than -1 or greater than 1.
+ *
+ * See also acos().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_acos(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_acos(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_acos(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_acos(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_acos(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_acos(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_acos(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_acos(half4 v);
+#endif
+
+/*
+ * native_acosh: Approximate inverse hyperbolic cosine
+ *
+ * Returns the approximate inverse hyperbolic cosine, in radians.
+ *
+ * See also acosh().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_acosh(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_acosh(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_acosh(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_acosh(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_acosh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_acosh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_acosh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_acosh(half4 v);
+#endif
+
+/*
+ * native_acospi: Approximate inverse cosine divided by pi
+ *
+ * Returns the approximate inverse cosine in radians, divided by pi.
+ *
+ * To get an inverse cosine measured in degrees, use acospi(a) * 180.f.
+ *
+ * This function yields undefined results from input values less than -1 or greater than 1.
+ *
+ * See also acospi().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_acospi(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_acospi(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_acospi(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_acospi(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_acospi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_acospi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_acospi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_acospi(half4 v);
+#endif
+
+/*
+ * native_asin: Approximate inverse sine
+ *
+ * Returns the approximate inverse sine, in radians.
+ *
+ * This function yields undefined results from input values less than -1 or greater than 1.
+ *
+ * See also asin().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_asin(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_asin(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_asin(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_asin(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_asin(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_asin(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_asin(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_asin(half4 v);
+#endif
+
+/*
+ * native_asinh: Approximate inverse hyperbolic sine
+ *
+ * Returns the approximate inverse hyperbolic sine, in radians.
+ *
+ * See also asinh().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_asinh(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_asinh(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_asinh(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_asinh(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_asinh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_asinh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_asinh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_asinh(half4 v);
+#endif
+
+/*
+ * native_asinpi: Approximate inverse sine divided by pi
+ *
+ * Returns the approximate inverse sine in radians, divided by pi.
+ *
+ * To get an inverse sine measured in degrees, use asinpi(a) * 180.f.
+ *
+ * This function yields undefined results from input values less than -1 or greater than 1.
+ *
+ * See also asinpi().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_asinpi(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_asinpi(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_asinpi(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_asinpi(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_asinpi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_asinpi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_asinpi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_asinpi(half4 v);
+#endif
+
+/*
+ * native_atan: Approximate inverse tangent
+ *
+ * Returns the approximate inverse tangent, in radians.
+ *
+ * See also atan().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_atan(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_atan(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_atan(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_atan(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_atan(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_atan(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_atan(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_atan(half4 v);
+#endif
+
+/*
+ * native_atan2: Approximate inverse tangent of a ratio
+ *
+ * Returns the approximate inverse tangent of (numerator / denominator), in radians.
+ *
+ * See also atan2().
+ *
+ * Parameters:
+ * numerator: Numerator.
+ * denominator: Denominator. Can be 0.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_atan2(float numerator, float denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_atan2(float2 numerator, float2 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_atan2(float3 numerator, float3 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_atan2(float4 numerator, float4 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_atan2(half numerator, half denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_atan2(half2 numerator, half2 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_atan2(half3 numerator, half3 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_atan2(half4 numerator, half4 denominator);
+#endif
+
+/*
+ * native_atan2pi: Approximate inverse tangent of a ratio, divided by pi
+ *
+ * Returns the approximate inverse tangent of (numerator / denominator),
+ * in radians, divided by pi.
+ *
+ * To get an inverse tangent measured in degrees, use atan2pi(n, d) * 180.f.
+ *
+ * See also atan2pi().
+ *
+ * Parameters:
+ * numerator: Numerator.
+ * denominator: Denominator. Can be 0.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_atan2pi(float numerator, float denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_atan2pi(float2 numerator, float2 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_atan2pi(float3 numerator, float3 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_atan2pi(float4 numerator, float4 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_atan2pi(half numerator, half denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_atan2pi(half2 numerator, half2 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_atan2pi(half3 numerator, half3 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_atan2pi(half4 numerator, half4 denominator);
+#endif
+
+/*
+ * native_atanh: Approximate inverse hyperbolic tangent
+ *
+ * Returns the approximate inverse hyperbolic tangent, in radians.
+ *
+ * See also atanh().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_atanh(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_atanh(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_atanh(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_atanh(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_atanh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_atanh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_atanh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_atanh(half4 v);
+#endif
+
+/*
+ * native_atanpi: Approximate inverse tangent divided by pi
+ *
+ * Returns the approximate inverse tangent in radians, divided by pi.
+ *
+ * To get an inverse tangent measured in degrees, use atanpi(a) * 180.f.
+ *
+ * See also atanpi().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_atanpi(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_atanpi(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_atanpi(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_atanpi(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_atanpi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_atanpi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_atanpi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_atanpi(half4 v);
+#endif
+
+/*
+ * native_cbrt: Approximate cube root
+ *
+ * Returns the approximate cubic root.
+ *
+ * See also cbrt().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_cbrt(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_cbrt(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_cbrt(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_cbrt(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_cbrt(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_cbrt(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_cbrt(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_cbrt(half4 v);
+#endif
+
+/*
+ * native_cos: Approximate cosine
+ *
+ * Returns the approximate cosine of an angle measured in radians.
+ *
+ * See also cos().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_cos(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_cos(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_cos(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_cos(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_cos(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_cos(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_cos(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_cos(half4 v);
+#endif
+
+/*
+ * native_cosh: Approximate hypebolic cosine
+ *
+ * Returns the approximate hypebolic cosine.
+ *
+ * See also cosh().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_cosh(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_cosh(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_cosh(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_cosh(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_cosh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_cosh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_cosh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_cosh(half4 v);
+#endif
+
+/*
+ * native_cospi: Approximate cosine of a number multiplied by pi
+ *
+ * Returns the approximate cosine of (v * pi), where (v * pi) is measured in radians.
+ *
+ * To get the cosine of a value measured in degrees, call cospi(v / 180.f).
+ *
+ * See also cospi().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_cospi(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_cospi(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_cospi(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_cospi(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_cospi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_cospi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_cospi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_cospi(half4 v);
+#endif
+
+/*
+ * native_divide: Approximate division
+ *
+ * Computes the approximate division of two values.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_divide(float left_vector, float right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_divide(float2 left_vector, float2 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_divide(float3 left_vector, float3 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_divide(float4 left_vector, float4 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_divide(half left_vector, half right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_divide(half2 left_vector, half2 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_divide(half3 left_vector, half3 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_divide(half4 left_vector, half4 right_vector);
+#endif
+
+/*
+ * native_exp: Approximate e raised to a number
+ *
+ * Fast approximate exp.
+ *
+ * It is valid for inputs from -86.f to 86.f. The precision is no worse than what would be
+ * expected from using 16 bit floating point values.
+ *
+ * See also exp().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float __attribute__((const, overloadable))
+ native_exp(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float2 __attribute__((const, overloadable))
+ native_exp(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float3 __attribute__((const, overloadable))
+ native_exp(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float4 __attribute__((const, overloadable))
+ native_exp(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_exp(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_exp(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_exp(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_exp(half4 v);
+#endif
+
+/*
+ * native_exp10: Approximate 10 raised to a number
+ *
+ * Fast approximate exp10.
+ *
+ * It is valid for inputs from -37.f to 37.f. The precision is no worse than what would be
+ * expected from using 16 bit floating point values.
+ *
+ * See also exp10().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float __attribute__((const, overloadable))
+ native_exp10(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float2 __attribute__((const, overloadable))
+ native_exp10(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float3 __attribute__((const, overloadable))
+ native_exp10(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float4 __attribute__((const, overloadable))
+ native_exp10(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_exp10(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_exp10(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_exp10(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_exp10(half4 v);
+#endif
+
+/*
+ * native_exp2: Approximate 2 raised to a number
+ *
+ * Fast approximate exp2.
+ *
+ * It is valid for inputs from -125.f to 125.f. The precision is no worse than what would be
+ * expected from using 16 bit floating point values.
+ *
+ * See also exp2().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float __attribute__((const, overloadable))
+ native_exp2(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float2 __attribute__((const, overloadable))
+ native_exp2(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float3 __attribute__((const, overloadable))
+ native_exp2(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float4 __attribute__((const, overloadable))
+ native_exp2(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_exp2(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_exp2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_exp2(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_exp2(half4 v);
+#endif
+
+/*
+ * native_expm1: Approximate e raised to a number minus one
+ *
+ * Returns the approximate (e ^ v) - 1.
+ *
+ * See also expm1().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_expm1(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_expm1(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_expm1(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_expm1(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_expm1(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_expm1(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_expm1(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_expm1(half4 v);
+#endif
+
+/*
+ * native_hypot: Approximate hypotenuse
+ *
+ * Returns the approximate native_sqrt(a * a + b * b)
+ *
+ * See also hypot().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_hypot(float a, float b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_hypot(float2 a, float2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_hypot(float3 a, float3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_hypot(float4 a, float4 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_hypot(half a, half b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_hypot(half2 a, half2 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_hypot(half3 a, half3 b);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_hypot(half4 a, half4 b);
+#endif
+
+/*
+ * native_log: Approximate natural logarithm
+ *
+ * Fast approximate log.
+ *
+ * It is not accurate for values very close to zero.
+ *
+ * See also log().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float __attribute__((const, overloadable))
+ native_log(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float2 __attribute__((const, overloadable))
+ native_log(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float3 __attribute__((const, overloadable))
+ native_log(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float4 __attribute__((const, overloadable))
+ native_log(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_log(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_log(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_log(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_log(half4 v);
+#endif
+
+/*
+ * native_log10: Approximate base 10 logarithm
+ *
+ * Fast approximate log10.
+ *
+ * It is not accurate for values very close to zero.
+ *
+ * See also log10().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float __attribute__((const, overloadable))
+ native_log10(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float2 __attribute__((const, overloadable))
+ native_log10(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float3 __attribute__((const, overloadable))
+ native_log10(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float4 __attribute__((const, overloadable))
+ native_log10(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_log10(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_log10(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_log10(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_log10(half4 v);
+#endif
+
+/*
+ * native_log1p: Approximate natural logarithm of a value plus 1
+ *
+ * Returns the approximate natural logarithm of (v + 1.0f)
+ *
+ * See also log1p().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_log1p(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_log1p(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_log1p(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_log1p(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_log1p(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_log1p(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_log1p(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_log1p(half4 v);
+#endif
+
+/*
+ * native_log2: Approximate base 2 logarithm
+ *
+ * Fast approximate log2.
+ *
+ * It is not accurate for values very close to zero.
+ *
+ * See also log2().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float __attribute__((const, overloadable))
+ native_log2(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float2 __attribute__((const, overloadable))
+ native_log2(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float3 __attribute__((const, overloadable))
+ native_log2(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float4 __attribute__((const, overloadable))
+ native_log2(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_log2(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_log2(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_log2(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_log2(half4 v);
+#endif
+
+/*
+ * native_powr: Approximate positive base raised to an exponent
+ *
+ * Fast approximate (base ^ exponent).
+ *
+ * See also powr().
+ *
+ * Parameters:
+ * base: Must be between 0.f and 256.f. The function is not accurate for values very close to zero.
+ * exponent: Must be between -15.f and 15.f.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float __attribute__((const, overloadable))
+ native_powr(float base, float exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float2 __attribute__((const, overloadable))
+ native_powr(float2 base, float2 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float3 __attribute__((const, overloadable))
+ native_powr(float3 base, float3 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+extern float4 __attribute__((const, overloadable))
+ native_powr(float4 base, float4 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_powr(half base, half exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_powr(half2 base, half2 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_powr(half3 base, half3 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_powr(half4 base, half4 exponent);
+#endif
+
+/*
+ * native_recip: Approximate reciprocal
+ *
+ * Returns the approximate approximate reciprocal of a value.
+ *
+ * See also half_recip().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_recip(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_recip(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_recip(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_recip(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_recip(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_recip(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_recip(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_recip(half4 v);
+#endif
+
+/*
+ * native_rootn: Approximate nth root
+ *
+ * Compute the approximate Nth root of a value.
+ *
+ * See also rootn().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_rootn(float v, int n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_rootn(float2 v, int2 n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_rootn(float3 v, int3 n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_rootn(float4 v, int4 n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_rootn(half v, int n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_rootn(half2 v, int2 n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_rootn(half3 v, int3 n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_rootn(half4 v, int4 n);
+#endif
+
+/*
+ * native_rsqrt: Approximate reciprocal of a square root
+ *
+ * Returns approximate (1 / sqrt(v)).
+ *
+ * See also rsqrt(), half_rsqrt().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_rsqrt(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_rsqrt(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_rsqrt(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_rsqrt(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_rsqrt(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_rsqrt(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_rsqrt(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_rsqrt(half4 v);
+#endif
+
+/*
+ * native_sin: Approximate sine
+ *
+ * Returns the approximate sine of an angle measured in radians.
+ *
+ * See also sin().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_sin(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_sin(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_sin(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_sin(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_sin(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_sin(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_sin(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_sin(half4 v);
+#endif
+
+/*
+ * native_sincos: Approximate sine and cosine
+ *
+ * Returns the approximate sine and cosine of a value.
+ *
+ * See also sincos().
+ *
+ * Parameters:
+ * v: Incoming value in radians.
+ * cos: *cos will be set to the cosine value.
+ *
+ * Returns: Sine.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((overloadable))
+ native_sincos(float v, float* cos);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((overloadable))
+ native_sincos(float2 v, float2* cos);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((overloadable))
+ native_sincos(float3 v, float3* cos);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((overloadable))
+ native_sincos(float4 v, float4* cos);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((overloadable))
+ native_sincos(half v, half* cos);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((overloadable))
+ native_sincos(half2 v, half2* cos);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((overloadable))
+ native_sincos(half3 v, half3* cos);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((overloadable))
+ native_sincos(half4 v, half4* cos);
+#endif
+
+/*
+ * native_sinh: Approximate hyperbolic sine
+ *
+ * Returns the approximate hyperbolic sine of a value specified in radians.
+ *
+ * See also sinh().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_sinh(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_sinh(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_sinh(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_sinh(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_sinh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_sinh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_sinh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_sinh(half4 v);
+#endif
+
+/*
+ * native_sinpi: Approximate sine of a number multiplied by pi
+ *
+ * Returns the approximate sine of (v * pi), where (v * pi) is measured in radians.
+ *
+ * To get the sine of a value measured in degrees, call sinpi(v / 180.f).
+ *
+ * See also sinpi().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_sinpi(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_sinpi(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_sinpi(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_sinpi(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_sinpi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_sinpi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_sinpi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_sinpi(half4 v);
+#endif
+
+/*
+ * native_sqrt: Approximate square root
+ *
+ * Returns the approximate sqrt(v).
+ *
+ * See also sqrt(), half_sqrt().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_sqrt(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_sqrt(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_sqrt(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_sqrt(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_sqrt(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_sqrt(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_sqrt(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_sqrt(half4 v);
+#endif
+
+/*
+ * native_tan: Approximate tangent
+ *
+ * Returns the approximate tangent of an angle measured in radians.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_tan(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_tan(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_tan(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_tan(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_tan(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_tan(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_tan(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_tan(half4 v);
+#endif
+
+/*
+ * native_tanh: Approximate hyperbolic tangent
+ *
+ * Returns the approximate hyperbolic tangent of a value.
+ *
+ * See also tanh().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_tanh(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_tanh(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_tanh(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_tanh(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_tanh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_tanh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_tanh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_tanh(half4 v);
+#endif
+
+/*
+ * native_tanpi: Approximate tangent of a number multiplied by pi
+ *
+ * Returns the approximate tangent of (v * pi), where (v * pi) is measured in radians.
+ *
+ * To get the tangent of a value measured in degrees, call tanpi(v / 180.f).
+ *
+ * See also tanpi().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_tanpi(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_tanpi(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_tanpi(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_tanpi(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_tanpi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_tanpi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_tanpi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_tanpi(half4 v);
+#endif
+
+/*
+ * nextafter: Next floating point number
+ *
+ * Returns the next representable floating point number from v towards target.
+ *
+ * In rs_fp_relaxed mode, a denormalized input value may not yield the next denormalized
+ * value, as support of denormalized values is optional in relaxed mode.
+ */
+extern float __attribute__((const, overloadable))
+ nextafter(float v, float target);
+
+extern float2 __attribute__((const, overloadable))
+ nextafter(float2 v, float2 target);
+
+extern float3 __attribute__((const, overloadable))
+ nextafter(float3 v, float3 target);
+
+extern float4 __attribute__((const, overloadable))
+ nextafter(float4 v, float4 target);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ nextafter(half v, half target);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ nextafter(half2 v, half2 target);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ nextafter(half3 v, half3 target);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ nextafter(half4 v, half4 target);
+#endif
+
+/*
+ * pow: Base raised to an exponent
+ *
+ * Returns base raised to the power exponent, i.e. base ^ exponent.
+ *
+ * pown() and powr() are similar. pown() takes an integer exponent. powr() assumes the
+ * base to be non-negative.
+ */
+extern float __attribute__((const, overloadable))
+ pow(float base, float exponent);
+
+extern float2 __attribute__((const, overloadable))
+ pow(float2 base, float2 exponent);
+
+extern float3 __attribute__((const, overloadable))
+ pow(float3 base, float3 exponent);
+
+extern float4 __attribute__((const, overloadable))
+ pow(float4 base, float4 exponent);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ pow(half base, half exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ pow(half2 base, half2 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ pow(half3 base, half3 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ pow(half4 base, half4 exponent);
+#endif
+
+/*
+ * pown: Base raised to an integer exponent
+ *
+ * Returns base raised to the power exponent, i.e. base ^ exponent.
+ *
+ * pow() and powr() are similar. The both take a float exponent. powr() also assumes the
+ * base to be non-negative.
+ */
+extern float __attribute__((const, overloadable))
+ pown(float base, int exponent);
+
+extern float2 __attribute__((const, overloadable))
+ pown(float2 base, int2 exponent);
+
+extern float3 __attribute__((const, overloadable))
+ pown(float3 base, int3 exponent);
+
+extern float4 __attribute__((const, overloadable))
+ pown(float4 base, int4 exponent);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ pown(half base, int exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ pown(half2 base, int2 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ pown(half3 base, int3 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ pown(half4 base, int4 exponent);
+#endif
+
+/*
+ * powr: Positive base raised to an exponent
+ *
+ * Returns base raised to the power exponent, i.e. base ^ exponent. base must be >= 0.
+ *
+ * pow() and pown() are similar. They both make no assumptions about the base.
+ * pow() takes a float exponent while pown() take an integer.
+ *
+ * See also native_powr().
+ */
+extern float __attribute__((const, overloadable))
+ powr(float base, float exponent);
+
+extern float2 __attribute__((const, overloadable))
+ powr(float2 base, float2 exponent);
+
+extern float3 __attribute__((const, overloadable))
+ powr(float3 base, float3 exponent);
+
+extern float4 __attribute__((const, overloadable))
+ powr(float4 base, float4 exponent);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ powr(half base, half exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ powr(half2 base, half2 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ powr(half3 base, half3 exponent);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ powr(half4 base, half4 exponent);
+#endif
+
+/*
+ * radians: Converts degrees into radians
+ *
+ * Converts from degrees to radians.
+ */
+extern float __attribute__((const, overloadable))
+ radians(float v);
+
+extern float2 __attribute__((const, overloadable))
+ radians(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ radians(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ radians(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ radians(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ radians(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ radians(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ radians(half4 v);
+#endif
+
+/*
+ * remainder: Remainder of a division
+ *
+ * Returns the remainder of (numerator / denominator), where the quotient is rounded towards
+ * the nearest integer.
+ *
+ * The function fmod() is similar but rounds toward the closest interger.
+ * For example, fmod(-3.8f, 2.f) returns -1.8f (-3.8f - -1.f * 2.f)
+ * while remainder(-3.8f, 2.f) returns 0.2f (-3.8f - -2.f * 2.f).
+ */
+extern float __attribute__((const, overloadable))
+ remainder(float numerator, float denominator);
+
+extern float2 __attribute__((const, overloadable))
+ remainder(float2 numerator, float2 denominator);
+
+extern float3 __attribute__((const, overloadable))
+ remainder(float3 numerator, float3 denominator);
+
+extern float4 __attribute__((const, overloadable))
+ remainder(float4 numerator, float4 denominator);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ remainder(half numerator, half denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ remainder(half2 numerator, half2 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ remainder(half3 numerator, half3 denominator);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ remainder(half4 numerator, half4 denominator);
+#endif
+
+/*
+ * remquo: Remainder and quotient of a division
+ *
+ * Returns the quotient and the remainder of (numerator / denominator).
+ *
+ * Only the sign and lowest three bits of the quotient are guaranteed to be accurate.
+ *
+ * This function is useful for implementing periodic functions. The low three bits of the
+ * quotient gives the quadrant and the remainder the distance within the quadrant.
+ * For example, an implementation of sin(x) could call remquo(x, PI / 2.f, &quadrant)
+ * to reduce very large value of x to something within a limited range.
+ *
+ * Example: remquo(-23.5f, 8.f, &quot) sets the lowest three bits of quot to 3
+ * and the sign negative. It returns 0.5f.
+ *
+ * Parameters:
+ * numerator: Numerator.
+ * denominator: Denominator.
+ * quotient: *quotient will be set to the integer quotient.
+ *
+ * Returns: Remainder, precise only for the low three bits.
+ */
+extern float __attribute__((overloadable))
+ remquo(float numerator, float denominator, int* quotient);
+
+extern float2 __attribute__((overloadable))
+ remquo(float2 numerator, float2 denominator, int2* quotient);
+
+extern float3 __attribute__((overloadable))
+ remquo(float3 numerator, float3 denominator, int3* quotient);
+
+extern float4 __attribute__((overloadable))
+ remquo(float4 numerator, float4 denominator, int4* quotient);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((overloadable))
+ remquo(half numerator, half denominator, int* quotient);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((overloadable))
+ remquo(half2 numerator, half2 denominator, int2* quotient);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((overloadable))
+ remquo(half3 numerator, half3 denominator, int3* quotient);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((overloadable))
+ remquo(half4 numerator, half4 denominator, int4* quotient);
+#endif
+
+/*
+ * rint: Round to even
+ *
+ * Rounds to the nearest integral value.
+ *
+ * rint() rounds half values to even. For example, rint(0.5f) returns 0.f and
+ * rint(1.5f) returns 2.f. Similarly, rint(-0.5f) returns -0.f and
+ * rint(-1.5f) returns -2.f.
+ *
+ * round() is similar but rounds away from zero. trunc() truncates the decimal fraction.
+ */
+extern float __attribute__((const, overloadable))
+ rint(float v);
+
+extern float2 __attribute__((const, overloadable))
+ rint(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ rint(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ rint(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ rint(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ rint(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ rint(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ rint(half4 v);
+#endif
+
+/*
+ * rootn: Nth root
+ *
+ * Compute the Nth root of a value.
+ *
+ * See also native_rootn().
+ */
+extern float __attribute__((const, overloadable))
+ rootn(float v, int n);
+
+extern float2 __attribute__((const, overloadable))
+ rootn(float2 v, int2 n);
+
+extern float3 __attribute__((const, overloadable))
+ rootn(float3 v, int3 n);
+
+extern float4 __attribute__((const, overloadable))
+ rootn(float4 v, int4 n);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ rootn(half v, int n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ rootn(half2 v, int2 n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ rootn(half3 v, int3 n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ rootn(half4 v, int4 n);
+#endif
+
+/*
+ * round: Round away from zero
+ *
+ * Round to the nearest integral value.
+ *
+ * round() rounds half values away from zero. For example, round(0.5f) returns 1.f
+ * and round(1.5f) returns 2.f. Similarly, round(-0.5f) returns -1.f
+ * and round(-1.5f) returns -2.f.
+ *
+ * rint() is similar but rounds half values toward even. trunc() truncates the decimal fraction.
+ */
+extern float __attribute__((const, overloadable))
+ round(float v);
+
+extern float2 __attribute__((const, overloadable))
+ round(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ round(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ round(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ round(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ round(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ round(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ round(half4 v);
+#endif
+
+/*
+ * rsqrt: Reciprocal of a square root
+ *
+ * Returns (1 / sqrt(v)).
+ *
+ * See also half_rsqrt(), native_rsqrt().
+ */
+extern float __attribute__((const, overloadable))
+ rsqrt(float v);
+
+extern float2 __attribute__((const, overloadable))
+ rsqrt(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ rsqrt(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ rsqrt(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ rsqrt(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ rsqrt(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ rsqrt(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ rsqrt(half4 v);
+#endif
+
+/*
+ * sign: Sign of a value
+ *
+ * Returns the sign of a value.
+ *
+ * if (v < 0) return -1.f;
+ * else if (v > 0) return 1.f;
+ * else return 0.f;
+ */
+extern float __attribute__((const, overloadable))
+ sign(float v);
+
+extern float2 __attribute__((const, overloadable))
+ sign(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ sign(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ sign(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ sign(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ sign(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ sign(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ sign(half4 v);
+#endif
+
+/*
+ * sin: Sine
+ *
+ * Returns the sine of an angle measured in radians.
+ *
+ * See also native_sin().
+ */
+extern float __attribute__((const, overloadable))
+ sin(float v);
+
+extern float2 __attribute__((const, overloadable))
+ sin(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ sin(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ sin(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ sin(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ sin(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ sin(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ sin(half4 v);
+#endif
+
+/*
+ * sincos: Sine and cosine
+ *
+ * Returns the sine and cosine of a value.
+ *
+ * See also native_sincos().
+ *
+ * Parameters:
+ * v: Incoming value in radians.
+ * cos: *cos will be set to the cosine value.
+ *
+ * Returns: Sine of v.
+ */
+extern float __attribute__((overloadable))
+ sincos(float v, float* cos);
+
+extern float2 __attribute__((overloadable))
+ sincos(float2 v, float2* cos);
+
+extern float3 __attribute__((overloadable))
+ sincos(float3 v, float3* cos);
+
+extern float4 __attribute__((overloadable))
+ sincos(float4 v, float4* cos);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((overloadable))
+ sincos(half v, half* cos);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((overloadable))
+ sincos(half2 v, half2* cos);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((overloadable))
+ sincos(half3 v, half3* cos);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((overloadable))
+ sincos(half4 v, half4* cos);
+#endif
+
+/*
+ * sinh: Hyperbolic sine
+ *
+ * Returns the hyperbolic sine of v, where v is measured in radians.
+ *
+ * See also native_sinh().
+ */
+extern float __attribute__((const, overloadable))
+ sinh(float v);
+
+extern float2 __attribute__((const, overloadable))
+ sinh(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ sinh(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ sinh(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ sinh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ sinh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ sinh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ sinh(half4 v);
+#endif
+
+/*
+ * sinpi: Sine of a number multiplied by pi
+ *
+ * Returns the sine of (v * pi), where (v * pi) is measured in radians.
+ *
+ * To get the sine of a value measured in degrees, call sinpi(v / 180.f).
+ *
+ * See also native_sinpi().
+ */
+extern float __attribute__((const, overloadable))
+ sinpi(float v);
+
+extern float2 __attribute__((const, overloadable))
+ sinpi(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ sinpi(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ sinpi(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ sinpi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ sinpi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ sinpi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ sinpi(half4 v);
+#endif
+
+/*
+ * sqrt: Square root
+ *
+ * Returns the square root of a value.
+ *
+ * See also half_sqrt(), native_sqrt().
+ */
+extern float __attribute__((const, overloadable))
+ sqrt(float v);
+
+extern float2 __attribute__((const, overloadable))
+ sqrt(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ sqrt(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ sqrt(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ sqrt(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ sqrt(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ sqrt(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ sqrt(half4 v);
+#endif
+
+/*
+ * step: 0 if less than a value, 0 otherwise
+ *
+ * Returns 0.f if v < edge, 1.f otherwise.
+ *
+ * This can be useful to create conditional computations without using loops and branching
+ * instructions. For example, instead of computing (a[i] < b[i]) ? 0.f : atan2(a[i], b[i])
+ * for the corresponding elements of a vector, you could instead use step(a, b) * atan2(a, b).
+ */
+extern float __attribute__((const, overloadable))
+ step(float edge, float v);
+
+extern float2 __attribute__((const, overloadable))
+ step(float2 edge, float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ step(float3 edge, float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ step(float4 edge, float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ step(half edge, half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ step(half2 edge, half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ step(half3 edge, half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ step(half4 edge, half4 v);
+#endif
+
+extern float2 __attribute__((const, overloadable))
+ step(float2 edge, float v);
+
+extern float3 __attribute__((const, overloadable))
+ step(float3 edge, float v);
+
+extern float4 __attribute__((const, overloadable))
+ step(float4 edge, float v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ step(half2 edge, half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ step(half3 edge, half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ step(half4 edge, half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ step(float edge, float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ step(float edge, float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ step(float edge, float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ step(half edge, half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ step(half edge, half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ step(half edge, half4 v);
+#endif
+
+/*
+ * tan: Tangent
+ *
+ * Returns the tangent of an angle measured in radians.
+ *
+ * See also native_tan().
+ */
+extern float __attribute__((const, overloadable))
+ tan(float v);
+
+extern float2 __attribute__((const, overloadable))
+ tan(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ tan(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ tan(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ tan(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ tan(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ tan(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ tan(half4 v);
+#endif
+
+/*
+ * tanh: Hyperbolic tangent
+ *
+ * Returns the hyperbolic tangent of a value.
+ *
+ * See also native_tanh().
+ */
+extern float __attribute__((const, overloadable))
+ tanh(float v);
+
+extern float2 __attribute__((const, overloadable))
+ tanh(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ tanh(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ tanh(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ tanh(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ tanh(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ tanh(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ tanh(half4 v);
+#endif
+
+/*
+ * tanpi: Tangent of a number multiplied by pi
+ *
+ * Returns the tangent of (v * pi), where (v * pi) is measured in radians.
+ *
+ * To get the tangent of a value measured in degrees, call tanpi(v / 180.f).
+ *
+ * See also native_tanpi().
+ */
+extern float __attribute__((const, overloadable))
+ tanpi(float v);
+
+extern float2 __attribute__((const, overloadable))
+ tanpi(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ tanpi(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ tanpi(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ tanpi(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ tanpi(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ tanpi(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ tanpi(half4 v);
+#endif
+
+/*
+ * tgamma: Gamma function
+ *
+ * Returns the gamma function of a value.
+ *
+ * See also lgamma().
+ */
+extern float __attribute__((const, overloadable))
+ tgamma(float v);
+
+extern float2 __attribute__((const, overloadable))
+ tgamma(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ tgamma(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ tgamma(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ tgamma(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ tgamma(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ tgamma(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ tgamma(half4 v);
+#endif
+
+/*
+ * trunc: Truncates a floating point
+ *
+ * Rounds to integral using truncation.
+ *
+ * For example, trunc(1.7f) returns 1.f and trunc(-1.7f) returns -1.f.
+ *
+ * See rint() and round() for other rounding options.
+ */
+extern float __attribute__((const, overloadable))
+ trunc(float v);
+
+extern float2 __attribute__((const, overloadable))
+ trunc(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ trunc(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ trunc(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ trunc(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ trunc(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ trunc(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ trunc(half4 v);
+#endif
+
+/*
+ * rsClamp: Restrain a value to a range
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Clamp a value between low and high.
+ *
+ * Parameters:
+ * amount: Value to clamp.
+ * low: Lower bound.
+ * high: Upper bound.
+ */
+extern char __attribute__((const, overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated("Use clamp() instead.")
+#endif
+))
+ rsClamp(char amount, char low, char high);
+
+extern uchar __attribute__((const, overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated("Use clamp() instead.")
+#endif
+))
+ rsClamp(uchar amount, uchar low, uchar high);
+
+extern short __attribute__((const, overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated("Use clamp() instead.")
+#endif
+))
+ rsClamp(short amount, short low, short high);
+
+extern ushort __attribute__((const, overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated("Use clamp() instead.")
+#endif
+))
+ rsClamp(ushort amount, ushort low, ushort high);
+
+extern int __attribute__((const, overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated("Use clamp() instead.")
+#endif
+))
+ rsClamp(int amount, int low, int high);
+
+extern uint __attribute__((const, overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated("Use clamp() instead.")
+#endif
+))
+ rsClamp(uint amount, uint low, uint high);
+
+/*
+ * rsFrac: Returns the fractional part of a float
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Returns the fractional part of a float
+ */
+extern float __attribute__((const, overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated("Use fract() instead.")
+#endif
+))
+ rsFrac(float v);
+
+/*
+ * rsRand: Pseudo-random number
+ *
+ * Return a random value between 0 (or min_value) and max_malue.
+ */
+extern int __attribute__((overloadable))
+ rsRand(int max_value);
+
+extern int __attribute__((overloadable))
+ rsRand(int min_value, int max_value);
+
+extern float __attribute__((overloadable))
+ rsRand(float max_value);
+
+extern float __attribute__((overloadable))
+ rsRand(float min_value, float max_value);
+
+#endif // RENDERSCRIPT_RS_MATH_RSH
diff --git a/current/platform/rs/scriptc/rs_matrix.rsh b/current/platform/rs/scriptc/rs_matrix.rsh
new file mode 100644
index 0000000..9cdc27f
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_matrix.rsh
@@ -0,0 +1,612 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_matrix.rsh: Matrix Functions
+ *
+ * These functions let you manipulate square matrices of rank 2x2, 3x3, and 4x4.
+ * They are particularly useful for graphical transformations and are compatible
+ * with OpenGL.
+ *
+ * We use a zero-based index for rows and columns. E.g. the last element of a
+ * rs_matrix4x4 is found at (3, 3).
+ *
+ * RenderScript uses column-major matrices and column-based vectors. Transforming
+ * a vector is done by postmultiplying the vector, e.g. (matrix * vector),
+ * as provided by rsMatrixMultiply().
+ *
+ * To create a transformation matrix that performs two transformations at once,
+ * multiply the two source matrices, with the first transformation as the right
+ * argument. E.g. to create a transformation matrix that applies the
+ * transformation s1 followed by s2, call rsMatrixLoadMultiply(&combined, &s2, &s1).
+ * This derives from s2 * (s1 * v), which is (s2 * s1) * v.
+ *
+ * We have two style of functions to create transformation matrices:
+ * rsMatrixLoadTransformation and rsMatrixTransformation. The former
+ * style simply stores the transformation matrix in the first argument. The latter
+ * modifies a pre-existing transformation matrix so that the new transformation
+ * happens first. E.g. if you call rsMatrixTranslate() on a matrix that already
+ * does a scaling, the resulting matrix when applied to a vector will first do the
+ * translation then the scaling.
+ */
+
+#ifndef RENDERSCRIPT_RS_MATRIX_RSH
+#define RENDERSCRIPT_RS_MATRIX_RSH
+
+#include "rs_vector_math.rsh"
+
+/*
+ * rsExtractFrustumPlanes: Compute frustum planes
+ *
+ * Computes 6 frustum planes from the view projection matrix
+ *
+ * Parameters:
+ * viewProj: Matrix to extract planes from.
+ * left: Left plane.
+ * right: Right plane.
+ * top: Top plane.
+ * bottom: Bottom plane.
+ * near: Near plane.
+ * far: Far plane.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsExtractFrustumPlanes(const rs_matrix4x4* viewProj, float4* left, float4* right, float4* top,
+ float4* bottom, float4* near, float4* far) {
+ // x y z w = a b c d in the plane equation
+ left->x = viewProj->m[3] + viewProj->m[0];
+ left->y = viewProj->m[7] + viewProj->m[4];
+ left->z = viewProj->m[11] + viewProj->m[8];
+ left->w = viewProj->m[15] + viewProj->m[12];
+
+ right->x = viewProj->m[3] - viewProj->m[0];
+ right->y = viewProj->m[7] - viewProj->m[4];
+ right->z = viewProj->m[11] - viewProj->m[8];
+ right->w = viewProj->m[15] - viewProj->m[12];
+
+ top->x = viewProj->m[3] - viewProj->m[1];
+ top->y = viewProj->m[7] - viewProj->m[5];
+ top->z = viewProj->m[11] - viewProj->m[9];
+ top->w = viewProj->m[15] - viewProj->m[13];
+
+ bottom->x = viewProj->m[3] + viewProj->m[1];
+ bottom->y = viewProj->m[7] + viewProj->m[5];
+ bottom->z = viewProj->m[11] + viewProj->m[9];
+ bottom->w = viewProj->m[15] + viewProj->m[13];
+
+ near->x = viewProj->m[3] + viewProj->m[2];
+ near->y = viewProj->m[7] + viewProj->m[6];
+ near->z = viewProj->m[11] + viewProj->m[10];
+ near->w = viewProj->m[15] + viewProj->m[14];
+
+ far->x = viewProj->m[3] - viewProj->m[2];
+ far->y = viewProj->m[7] - viewProj->m[6];
+ far->z = viewProj->m[11] - viewProj->m[10];
+ far->w = viewProj->m[15] - viewProj->m[14];
+
+ float len = length(left->xyz);
+ *left /= len;
+ len = length(right->xyz);
+ *right /= len;
+ len = length(top->xyz);
+ *top /= len;
+ len = length(bottom->xyz);
+ *bottom /= len;
+ len = length(near->xyz);
+ *near /= len;
+ len = length(far->xyz);
+ *far /= len;
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsExtractFrustumPlanes(const rs_matrix4x4* viewProj, float4* left, float4* righ, float4* top,
+ float4* bottom, float4* near, float4* far);
+#endif
+
+/*
+ * rsIsSphereInFrustum: Checks if a sphere is within the frustum planes
+ *
+ * Returns true if the sphere is within the 6 frustum planes.
+ *
+ * Parameters:
+ * sphere: float4 representing the sphere.
+ * left: Left plane.
+ * right: Right plane.
+ * top: Top plane.
+ * bottom: Bottom plane.
+ * near: Near plane.
+ * far: Far plane.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline bool __attribute__((always_inline, overloadable))
+ rsIsSphereInFrustum(float4* sphere, float4* left, float4* right, float4* top, float4* bottom,
+ float4* near, float4* far) {
+ float distToCenter = dot(left->xyz, sphere->xyz) + left->w;
+ if (distToCenter < -sphere->w) {
+ return false;
+ }
+ distToCenter = dot(right->xyz, sphere->xyz) + right->w;
+ if (distToCenter < -sphere->w) {
+ return false;
+ }
+ distToCenter = dot(top->xyz, sphere->xyz) + top->w;
+ if (distToCenter < -sphere->w) {
+ return false;
+ }
+ distToCenter = dot(bottom->xyz, sphere->xyz) + bottom->w;
+ if (distToCenter < -sphere->w) {
+ return false;
+ }
+ distToCenter = dot(near->xyz, sphere->xyz) + near->w;
+ if (distToCenter < -sphere->w) {
+ return false;
+ }
+ distToCenter = dot(far->xyz, sphere->xyz) + far->w;
+ if (distToCenter < -sphere->w) {
+ return false;
+ }
+ return true;
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern bool __attribute__((overloadable))
+ rsIsSphereInFrustum(float4* sphere, float4* left, float4* right, float4* top, float4* bottom,
+ float4* near, float4* far);
+#endif
+
+/*
+ * rsMatrixGet: Get one element
+ *
+ * Returns one element of a matrix.
+ *
+ * Warning: The order of the column and row parameters may be unexpected.
+ *
+ * Parameters:
+ * m: Matrix to extract the element from.
+ * col: Zero-based column of the element to be extracted.
+ * row: Zero-based row of the element to extracted.
+ */
+extern float __attribute__((overloadable))
+ rsMatrixGet(const rs_matrix4x4* m, uint32_t col, uint32_t row);
+
+extern float __attribute__((overloadable))
+ rsMatrixGet(const rs_matrix3x3* m, uint32_t col, uint32_t row);
+
+extern float __attribute__((overloadable))
+ rsMatrixGet(const rs_matrix2x2* m, uint32_t col, uint32_t row);
+
+/*
+ * rsMatrixInverse: Inverts a matrix in place
+ *
+ * Returns true if the matrix was successfully inverted.
+ *
+ * Parameters:
+ * m: Matrix to invert.
+ */
+extern bool __attribute__((overloadable))
+ rsMatrixInverse(rs_matrix4x4* m);
+
+/*
+ * rsMatrixInverseTranspose: Inverts and transpose a matrix in place
+ *
+ * The matrix is first inverted then transposed. Returns true if the matrix was
+ * successfully inverted.
+ *
+ * Parameters:
+ * m: Matrix to modify.
+ */
+extern bool __attribute__((overloadable))
+ rsMatrixInverseTranspose(rs_matrix4x4* m);
+
+/*
+ * rsMatrixLoad: Load or copy a matrix
+ *
+ * Set the elements of a matrix from an array of floats or from another matrix.
+ *
+ * If loading from an array, the floats should be in row-major order, i.e. the element a
+ * row 0, column 0 should be first, followed by the element at
+ * row 0, column 1, etc.
+ *
+ * If loading from a matrix and the source is smaller than the destination, the rest
+ * of the destination is filled with elements of the identity matrix. E.g.
+ * loading a rs_matrix2x2 into a rs_matrix4x4 will give:
+ *
+ * m00 m01 0.0 0.0
+ * m10 m11 0.0 0.0
+ * 0.0 0.0 1.0 0.0
+ * 0.0 0.0 0.0 1.0
+ *
+ *
+ * Parameters:
+ * destination: Matrix to set.
+ * array: Array of values to set the matrix to. These arrays should be 4, 9, or 16 floats long, depending on the matrix size.
+ * source: Source matrix.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixLoad(rs_matrix4x4* destination, const float* array);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoad(rs_matrix3x3* destination, const float* array);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoad(rs_matrix2x2* destination, const float* array);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoad(rs_matrix4x4* destination, const rs_matrix4x4* source);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoad(rs_matrix3x3* destination, const rs_matrix3x3* source);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoad(rs_matrix2x2* destination, const rs_matrix2x2* source);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoad(rs_matrix4x4* destination, const rs_matrix3x3* source);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoad(rs_matrix4x4* destination, const rs_matrix2x2* source);
+
+/*
+ * rsMatrixLoadFrustum: Load a frustum projection matrix
+ *
+ * Constructs a frustum projection matrix, transforming the box identified by
+ * the six clipping planes left, right, bottom, top, near, far.
+ *
+ * To apply this projection to a vector, multiply the vector by the created
+ * matrix using rsMatrixMultiply().
+ *
+ * Parameters:
+ * m: Matrix to set.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixLoadFrustum(rs_matrix4x4* m, float left, float right, float bottom, float top,
+ float near, float far);
+
+/*
+ * rsMatrixLoadIdentity: Load identity matrix
+ *
+ * Set the elements of a matrix to the identity matrix.
+ *
+ * Parameters:
+ * m: Matrix to set.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixLoadIdentity(rs_matrix4x4* m);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoadIdentity(rs_matrix3x3* m);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoadIdentity(rs_matrix2x2* m);
+
+/*
+ * rsMatrixLoadMultiply: Multiply two matrices
+ *
+ * Sets m to the matrix product of lhs * rhs.
+ *
+ * To combine two 4x4 transformaton matrices, multiply the second transformation matrix
+ * by the first transformation matrix. E.g. to create a transformation matrix that applies
+ * the transformation s1 followed by s2, call rsMatrixLoadMultiply(&combined, &s2, &s1).
+ *
+ * Warning: Prior to version 21, storing the result back into right matrix is not supported and
+ * will result in undefined behavior. Use rsMatrixMulitply instead. E.g. instead of doing
+ * rsMatrixLoadMultiply (&m2r, &m2r, &m2l), use rsMatrixMultiply (&m2r, &m2l).
+ * rsMatrixLoadMultiply (&m2l, &m2r, &m2l) works as expected.
+ *
+ * Parameters:
+ * m: Matrix to set.
+ * lhs: Left matrix of the product.
+ * rhs: Right matrix of the product.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixLoadMultiply(rs_matrix4x4* m, const rs_matrix4x4* lhs, const rs_matrix4x4* rhs);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoadMultiply(rs_matrix3x3* m, const rs_matrix3x3* lhs, const rs_matrix3x3* rhs);
+
+extern void __attribute__((overloadable))
+ rsMatrixLoadMultiply(rs_matrix2x2* m, const rs_matrix2x2* lhs, const rs_matrix2x2* rhs);
+
+/*
+ * rsMatrixLoadOrtho: Load an orthographic projection matrix
+ *
+ * Constructs an orthographic projection matrix, transforming the box identified by the
+ * six clipping planes left, right, bottom, top, near, far into a unit cube
+ * with a corner at (-1, -1, -1) and the opposite at (1, 1, 1).
+ *
+ * To apply this projection to a vector, multiply the vector by the created matrix
+ * using rsMatrixMultiply().
+ *
+ * See https://en.wikipedia.org/wiki/Orthographic_projection .
+ *
+ * Parameters:
+ * m: Matrix to set.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixLoadOrtho(rs_matrix4x4* m, float left, float right, float bottom, float top, float near,
+ float far);
+
+/*
+ * rsMatrixLoadPerspective: Load a perspective projection matrix
+ *
+ * Constructs a perspective projection matrix, assuming a symmetrical field of view.
+ *
+ * To apply this projection to a vector, multiply the vector by the created matrix
+ * using rsMatrixMultiply().
+ *
+ * Parameters:
+ * m: Matrix to set.
+ * fovy: Field of view, in degrees along the Y axis.
+ * aspect: Ratio of x / y.
+ * near: Near clipping plane.
+ * far: Far clipping plane.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixLoadPerspective(rs_matrix4x4* m, float fovy, float aspect, float near, float far);
+
+/*
+ * rsMatrixLoadRotate: Load a rotation matrix
+ *
+ * This function creates a rotation matrix. The axis of rotation is the (x, y, z) vector.
+ *
+ * To rotate a vector, multiply the vector by the created matrix using rsMatrixMultiply().
+ *
+ * See http://en.wikipedia.org/wiki/Rotation_matrix .
+ *
+ * Parameters:
+ * m: Matrix to set.
+ * rot: How much rotation to do, in degrees.
+ * x: X component of the vector that is the axis of rotation.
+ * y: Y component of the vector that is the axis of rotation.
+ * z: Z component of the vector that is the axis of rotation.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixLoadRotate(rs_matrix4x4* m, float rot, float x, float y, float z);
+
+/*
+ * rsMatrixLoadScale: Load a scaling matrix
+ *
+ * This function creates a scaling matrix, where each component of a vector is multiplied
+ * by a number. This number can be negative.
+ *
+ * To scale a vector, multiply the vector by the created matrix using rsMatrixMultiply().
+ *
+ * Parameters:
+ * m: Matrix to set.
+ * x: Multiple to scale the x components by.
+ * y: Multiple to scale the y components by.
+ * z: Multiple to scale the z components by.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixLoadScale(rs_matrix4x4* m, float x, float y, float z);
+
+/*
+ * rsMatrixLoadTranslate: Load a translation matrix
+ *
+ * This function creates a translation matrix, where a number is added to each element of
+ * a vector.
+ *
+ * To translate a vector, multiply the vector by the created matrix using
+ * rsMatrixMultiply().
+ *
+ * Parameters:
+ * m: Matrix to set.
+ * x: Number to add to each x component.
+ * y: Number to add to each y component.
+ * z: Number to add to each z component.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixLoadTranslate(rs_matrix4x4* m, float x, float y, float z);
+
+/*
+ * rsMatrixMultiply: Multiply a matrix by a vector or another matrix
+ *
+ * For the matrix by matrix variant, sets m to the matrix product m * rhs.
+ *
+ * When combining two 4x4 transformation matrices using this function, the resulting
+ * matrix will correspond to performing the rhs transformation first followed by
+ * the original m transformation.
+ *
+ * For the matrix by vector variant, returns the post-multiplication of the vector
+ * by the matrix, ie. m * in.
+ *
+ * When multiplying a float3 to a rs_matrix4x4, the vector is expanded with (1).
+ *
+ * When multiplying a float2 to a rs_matrix4x4, the vector is expanded with (0, 1).
+ *
+ * When multiplying a float2 to a rs_matrix3x3, the vector is expanded with (0).
+ *
+ * Starting with API 14, this function takes a const matrix as the first argument.
+ *
+ * Parameters:
+ * m: Left matrix of the product and the matrix to be set.
+ * rhs: Right matrix of the product.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixMultiply(rs_matrix4x4* m, const rs_matrix4x4* rhs);
+
+extern void __attribute__((overloadable))
+ rsMatrixMultiply(rs_matrix3x3* m, const rs_matrix3x3* rhs);
+
+extern void __attribute__((overloadable))
+ rsMatrixMultiply(rs_matrix2x2* m, const rs_matrix2x2* rhs);
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 13)
+extern float4 __attribute__((overloadable))
+ rsMatrixMultiply(rs_matrix4x4* m, float4 in);
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 13)
+extern float4 __attribute__((overloadable))
+ rsMatrixMultiply(rs_matrix4x4* m, float3 in);
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 13)
+extern float4 __attribute__((overloadable))
+ rsMatrixMultiply(rs_matrix4x4* m, float2 in);
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 13)
+extern float3 __attribute__((overloadable))
+ rsMatrixMultiply(rs_matrix3x3* m, float3 in);
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 13)
+extern float3 __attribute__((overloadable))
+ rsMatrixMultiply(rs_matrix3x3* m, float2 in);
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 13)
+extern float2 __attribute__((overloadable))
+ rsMatrixMultiply(rs_matrix2x2* m, float2 in);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern float4 __attribute__((overloadable))
+ rsMatrixMultiply(const rs_matrix4x4* m, float4 in);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern float4 __attribute__((overloadable))
+ rsMatrixMultiply(const rs_matrix4x4* m, float3 in);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern float4 __attribute__((overloadable))
+ rsMatrixMultiply(const rs_matrix4x4* m, float2 in);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern float3 __attribute__((overloadable))
+ rsMatrixMultiply(const rs_matrix3x3* m, float3 in);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern float3 __attribute__((overloadable))
+ rsMatrixMultiply(const rs_matrix3x3* m, float2 in);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+extern float2 __attribute__((overloadable))
+ rsMatrixMultiply(const rs_matrix2x2* m, float2 in);
+#endif
+
+/*
+ * rsMatrixRotate: Apply a rotation to a transformation matrix
+ *
+ * Multiply the matrix m with a rotation matrix.
+ *
+ * This function modifies a transformation matrix to first do a rotation. The axis of
+ * rotation is the (x, y, z) vector.
+ *
+ * To apply this combined transformation to a vector, multiply the vector by the created
+ * matrix using rsMatrixMultiply().
+ *
+ * Parameters:
+ * m: Matrix to modify.
+ * rot: How much rotation to do, in degrees.
+ * x: X component of the vector that is the axis of rotation.
+ * y: Y component of the vector that is the axis of rotation.
+ * z: Z component of the vector that is the axis of rotation.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixRotate(rs_matrix4x4* m, float rot, float x, float y, float z);
+
+/*
+ * rsMatrixScale: Apply a scaling to a transformation matrix
+ *
+ * Multiply the matrix m with a scaling matrix.
+ *
+ * This function modifies a transformation matrix to first do a scaling. When scaling,
+ * each component of a vector is multiplied by a number. This number can be negative.
+ *
+ * To apply this combined transformation to a vector, multiply the vector by the created
+ * matrix using rsMatrixMultiply().
+ *
+ * Parameters:
+ * m: Matrix to modify.
+ * x: Multiple to scale the x components by.
+ * y: Multiple to scale the y components by.
+ * z: Multiple to scale the z components by.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixScale(rs_matrix4x4* m, float x, float y, float z);
+
+/*
+ * rsMatrixSet: Set one element
+ *
+ * Set an element of a matrix.
+ *
+ * Warning: The order of the column and row parameters may be unexpected.
+ *
+ * Parameters:
+ * m: Matrix that will be modified.
+ * col: Zero-based column of the element to be set.
+ * row: Zero-based row of the element to be set.
+ * v: Value to set.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixSet(rs_matrix4x4* m, uint32_t col, uint32_t row, float v);
+
+extern void __attribute__((overloadable))
+ rsMatrixSet(rs_matrix3x3* m, uint32_t col, uint32_t row, float v);
+
+extern void __attribute__((overloadable))
+ rsMatrixSet(rs_matrix2x2* m, uint32_t col, uint32_t row, float v);
+
+/*
+ * rsMatrixTranslate: Apply a translation to a transformation matrix
+ *
+ * Multiply the matrix m with a translation matrix.
+ *
+ * This function modifies a transformation matrix to first do a translation. When
+ * translating, a number is added to each component of a vector.
+ *
+ * To apply this combined transformation to a vector, multiply the vector by the
+ * created matrix using rsMatrixMultiply().
+ *
+ * Parameters:
+ * m: Matrix to modify.
+ * x: Number to add to each x component.
+ * y: Number to add to each y component.
+ * z: Number to add to each z component.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixTranslate(rs_matrix4x4* m, float x, float y, float z);
+
+/*
+ * rsMatrixTranspose: Transpose a matrix place
+ *
+ * Transpose the matrix m in place.
+ *
+ * Parameters:
+ * m: Matrix to transpose.
+ */
+extern void __attribute__((overloadable))
+ rsMatrixTranspose(rs_matrix4x4* m);
+
+extern void __attribute__((overloadable))
+ rsMatrixTranspose(rs_matrix3x3* m);
+
+extern void __attribute__((overloadable))
+ rsMatrixTranspose(rs_matrix2x2* m);
+
+#endif // RENDERSCRIPT_RS_MATRIX_RSH
diff --git a/current/platform/rs/scriptc/rs_object_info.rsh b/current/platform/rs/scriptc/rs_object_info.rsh
new file mode 100644
index 0000000..0b18de3
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_object_info.rsh
@@ -0,0 +1,462 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_object_info.rsh: Object Characteristics Functions
+ *
+ * The functions below can be used to query the characteristics of an Allocation, Element,
+ * or Sampler object. These objects are created from Java. You can't create them from a
+ * script.
+ *
+ * Allocations:
+ *
+ * Allocations are the primary method used to pass data to and from RenderScript kernels.
+ *
+ * They are a structured collection of cells that can be used to store bitmaps, textures,
+ * arbitrary data points, etc.
+ *
+ * This collection of cells may have many dimensions (X, Y, Z, Array0, Array1, Array2, Array3),
+ * faces (for cubemaps), and level of details (for mipmapping).
+ *
+ * See the android.renderscript.Allocation for details on to create Allocations.
+ *
+ * Elements:
+ *
+ * The term "element" is used a bit ambiguously in RenderScript, as both type information
+ * for the cells of an Allocation and the instantiation of that type. For example:
+ * - rs_element is a handle to a type specification, and
+ * - In functions like rsGetElementAt(), "element" means the instantiation of the type,
+ * i.e. a cell of an Allocation.
+ *
+ * The functions below let you query the characteristics of the type specificiation.
+ *
+ * An Element can specify a simple data types as found in C, e.g. an integer, float, or
+ * boolean. It can also specify a handle to a RenderScript object. See rs_data_type for
+ * a list of basic types.
+ *
+ * Elements can specify fixed size vector (of size 2, 3, or 4) versions of the basic types.
+ * Elements can be grouped together into complex Elements, creating the equivalent of
+ * C structure definitions.
+ *
+ * Elements can also have a kind, which is semantic information used to interpret pixel
+ * data. See rs_data_kind.
+ *
+ * When creating Allocations of common elements, you can simply use one of the many predefined
+ * Elements like F32_2.
+ *
+ * To create complex Elements, use the Element.Builder Java class.
+ *
+ * Samplers:
+ *
+ * Samplers objects define how Allocations can be read as structure within a kernel.
+ * See android.renderscript.S.
+ */
+
+#ifndef RENDERSCRIPT_RS_OBJECT_INFO_RSH
+#define RENDERSCRIPT_RS_OBJECT_INFO_RSH
+
+/*
+ * rsAllocationGetDimFaces: Presence of more than one face
+ *
+ * If the Allocation is a cubemap, this function returns 1 if there's more than one face
+ * present. In all other cases, it returns 0.
+ *
+ * Use rsGetDimHasFaces() to get the dimension of a currently running kernel.
+ *
+ * Returns: Returns 1 if more than one face is present, 0 otherwise.
+ */
+extern uint32_t __attribute__((overloadable))
+ rsAllocationGetDimFaces(rs_allocation a);
+
+/*
+ * rsAllocationGetDimLOD: Presence of levels of detail
+ *
+ * Query an Allocation for the presence of more than one Level Of Detail. This is useful
+ * for mipmaps.
+ *
+ * Use rsGetDimLod() to get the dimension of a currently running kernel.
+ *
+ * Returns: Returns 1 if more than one LOD is present, 0 otherwise.
+ */
+extern uint32_t __attribute__((overloadable))
+ rsAllocationGetDimLOD(rs_allocation a);
+
+/*
+ * rsAllocationGetDimX: Size of the X dimension
+ *
+ * Returns the size of the X dimension of the Allocation.
+ *
+ * Use rsGetDimX() to get the dimension of a currently running kernel.
+ *
+ * Returns: X dimension of the Allocation.
+ */
+extern uint32_t __attribute__((overloadable))
+ rsAllocationGetDimX(rs_allocation a);
+
+/*
+ * rsAllocationGetDimY: Size of the Y dimension
+ *
+ * Returns the size of the Y dimension of the Allocation. If the Allocation has less
+ * than two dimensions, returns 0.
+ *
+ * Use rsGetDimY() to get the dimension of a currently running kernel.
+ *
+ * Returns: Y dimension of the Allocation.
+ */
+extern uint32_t __attribute__((overloadable))
+ rsAllocationGetDimY(rs_allocation a);
+
+/*
+ * rsAllocationGetDimZ: Size of the Z dimension
+ *
+ * Returns the size of the Z dimension of the Allocation. If the Allocation has less
+ * than three dimensions, returns 0.
+ *
+ * Use rsGetDimZ() to get the dimension of a currently running kernel.
+ *
+ * Returns: Z dimension of the Allocation.
+ */
+extern uint32_t __attribute__((overloadable))
+ rsAllocationGetDimZ(rs_allocation a);
+
+/*
+ * rsAllocationGetElement: Get the object that describes the cell of an Allocation
+ *
+ * Get the Element object describing the type, kind, and other characteristics of a cell
+ * of an Allocation. See the rsElement* functions below.
+ *
+ * Parameters:
+ * a: Allocation to get data from.
+ *
+ * Returns: Element describing Allocation layout.
+ */
+extern rs_element __attribute__((overloadable))
+ rsAllocationGetElement(rs_allocation a);
+
+/*
+ * rsClearObject: Release an object
+ *
+ * Tells the run time that this handle will no longer be used to access the the related
+ * object. If this was the last handle to that object, resource recovery may happen.
+ *
+ * After calling this function, *dst will be set to an empty handle. See rsIsObject().
+ */
+extern void __attribute__((overloadable))
+ rsClearObject(rs_element* dst);
+
+extern void __attribute__((overloadable))
+ rsClearObject(rs_type* dst);
+
+extern void __attribute__((overloadable))
+ rsClearObject(rs_allocation* dst);
+
+extern void __attribute__((overloadable))
+ rsClearObject(rs_sampler* dst);
+
+extern void __attribute__((overloadable))
+ rsClearObject(rs_script* dst);
+
+/*
+ * rsIsObject: Check for an empty handle
+ *
+ * Returns true if the handle contains a non-null reference.
+ *
+ * This function does not validate that the internal pointer used in the handle
+ * points to an actual valid object; it only checks for null.
+ *
+ * This function can be used to check the Element returned by rsElementGetSubElement()
+ * or see if rsClearObject() has been called on a handle.
+ */
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_element v);
+
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_type v);
+
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_allocation v);
+
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_sampler v);
+
+extern bool __attribute__((overloadable))
+ rsIsObject(rs_script v);
+
+/*
+ * rsElementGetBytesSize: Size of an Element
+ *
+ * Returns the size in bytes that an instantiation of this Element will occupy.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern uint32_t __attribute__((overloadable))
+ rsElementGetBytesSize(rs_element e);
+#endif
+
+/*
+ * rsElementGetDataKind: Kind of an Element
+ *
+ * Returns the Element's data kind. This is used to interpret pixel data.
+ *
+ * See rs_data_kind.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern rs_data_kind __attribute__((overloadable))
+ rsElementGetDataKind(rs_element e);
+#endif
+
+/*
+ * rsElementGetDataType: Data type of an Element
+ *
+ * Returns the Element's base data type. This can be a type similar to C/C++ (e.g.
+ * RS_TYPE_UNSIGNED_8), a handle (e.g. RS_TYPE_ALLOCATION and RS_TYPE_ELEMENT), or a
+ * more complex numerical type (e.g. RS_TYPE_UNSIGNED_5_6_5 and RS_TYPE_MATRIX_4X4).
+ * See rs_data_type.
+ *
+ * If the Element describes a vector, this function returns the data type of one of its items.
+ * Use rsElementGetVectorSize to get the size of the vector.
+ *
+ * If the Element describes a structure, RS_TYPE_NONE is returned. Use the rsElementGetSub*
+ * functions to explore this complex Element.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern rs_data_type __attribute__((overloadable))
+ rsElementGetDataType(rs_element e);
+#endif
+
+/*
+ * rsElementGetSubElement: Sub-element of a complex Element
+ *
+ * For Elements that represents a structure, this function returns the sub-element at the
+ * specified index.
+ *
+ * If the Element is not a structure or the index is greater or equal to the number of
+ * sub-elements, an invalid handle is returned.
+ *
+ * Parameters:
+ * e: Element to query.
+ * index: Index of the sub-element to return.
+ *
+ * Returns: Sub-element at the given index.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern rs_element __attribute__((overloadable))
+ rsElementGetSubElement(rs_element e, uint32_t index);
+#endif
+
+/*
+ * rsElementGetSubElementArraySize: Array size of a sub-element of a complex Element
+ *
+ * For complex Elements, sub-elements can be statically sized arrays. This function
+ * returns the array size of the sub-element at the index. This sub-element repetition
+ * is different than fixed size vectors.
+ *
+ * Parameters:
+ * e: Element to query.
+ * index: Index of the sub-element.
+ *
+ * Returns: Array size of the sub-element.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern uint32_t __attribute__((overloadable))
+ rsElementGetSubElementArraySize(rs_element e, uint32_t index);
+#endif
+
+/*
+ * rsElementGetSubElementCount: Number of sub-elements
+ *
+ * Elements can be simple, such as an int or a float, or a structure with multiple
+ * sub-elements. This function returns zero for simple Elements and the number of
+ * sub-elements for complex Elements.
+ *
+ * Parameters:
+ * e: Element to get data from.
+ *
+ * Returns: Number of sub-elements.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern uint32_t __attribute__((overloadable))
+ rsElementGetSubElementCount(rs_element e);
+#endif
+
+/*
+ * rsElementGetSubElementName: Name of a sub-element
+ *
+ * For complex Elements, this function returns the name of the sub-element at the
+ * specified index.
+ *
+ * Parameters:
+ * e: Element to get data from.
+ * index: Index of the sub-element.
+ * name: Address of the array to store the name into.
+ * nameLength: Length of the provided name array.
+ *
+ * Returns: Number of characters copied, excluding the null terminator.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern uint32_t __attribute__((overloadable))
+ rsElementGetSubElementName(rs_element e, uint32_t index, char* name, uint32_t nameLength);
+#endif
+
+/*
+ * rsElementGetSubElementNameLength: Length of the name of a sub-element
+ *
+ * For complex Elements, this function returns the length of the name of the sub-element
+ * at the specified index.
+ *
+ * Parameters:
+ * e: Element to get data from.
+ * index: Index of the sub-element.
+ *
+ * Returns: Length of the sub-element name including the null terminator.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern uint32_t __attribute__((overloadable))
+ rsElementGetSubElementNameLength(rs_element e, uint32_t index);
+#endif
+
+/*
+ * rsElementGetSubElementOffsetBytes: Offset of the instantiated sub-element
+ *
+ * This function returns the relative position of the instantiation of the specified
+ * sub-element within the instantiation of the Element.
+ *
+ * For example, if the Element describes a 32 bit float followed by a 32 bit integer,
+ * the offset return for the first will be 0 and the second 4.
+ *
+ * Parameters:
+ * e: Element to get data from.
+ * index: Index of the sub-element.
+ *
+ * Returns: Offset in bytes.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern uint32_t __attribute__((overloadable))
+ rsElementGetSubElementOffsetBytes(rs_element e, uint32_t index);
+#endif
+
+/*
+ * rsElementGetVectorSize: Vector size of the Element
+ *
+ * Returns the Element's vector size. If the Element does not represent a vector,
+ * 1 is returned.
+ *
+ * Parameters:
+ * e: Element to get data from.
+ *
+ * Returns: Length of the element vector.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern uint32_t __attribute__((overloadable))
+ rsElementGetVectorSize(rs_element e);
+#endif
+
+/*
+ * rsGetAllocation: Return the Allocation for a given pointer
+ *
+ * DEPRECATED. Do not use.
+ *
+ * Returns the Allocation for a given pointer. The pointer should point within a valid
+ * allocation. The results are undefined if the pointer is not from a valid Allocation.
+ */
+extern rs_allocation __attribute__((overloadable
+#if (defined(RS_VERSION) && (RS_VERSION >= 22))
+, deprecated("This function is deprecated and will be removed from the SDK in a future release.")
+#endif
+))
+ rsGetAllocation(const void* p);
+
+/*
+ * rsSamplerGetAnisotropy: Anisotropy of the Sampler
+ *
+ * Get the Sampler's anisotropy.
+ *
+ * See android.renderscript.S.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern float __attribute__((overloadable))
+ rsSamplerGetAnisotropy(rs_sampler s);
+#endif
+
+/*
+ * rsSamplerGetMagnification: Sampler magnification value
+ *
+ * Get the Sampler's magnification value.
+ *
+ * See android.renderscript.S.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern rs_sampler_value __attribute__((overloadable))
+ rsSamplerGetMagnification(rs_sampler s);
+#endif
+
+/*
+ * rsSamplerGetMinification: Sampler minification value
+ *
+ * Get the Sampler's minification value.
+ *
+ * See android.renderscript.S.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern rs_sampler_value __attribute__((overloadable))
+ rsSamplerGetMinification(rs_sampler s);
+#endif
+
+/*
+ * rsSamplerGetWrapS: Sampler wrap S value
+ *
+ * Get the Sampler's wrap S value.
+ *
+ * See android.renderscript.S.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern rs_sampler_value __attribute__((overloadable))
+ rsSamplerGetWrapS(rs_sampler s);
+#endif
+
+/*
+ * rsSamplerGetWrapT: Sampler wrap T value
+ *
+ * Get the sampler's wrap T value.
+ *
+ * See android.renderscript.S.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+extern rs_sampler_value __attribute__((overloadable))
+ rsSamplerGetWrapT(rs_sampler s);
+#endif
+
+/*
+ * rsSetObject: For internal use.
+ *
+ */
+extern void __attribute__((overloadable))
+ rsSetObject(rs_element* dst, rs_element src);
+
+extern void __attribute__((overloadable))
+ rsSetObject(rs_type* dst, rs_type src);
+
+extern void __attribute__((overloadable))
+ rsSetObject(rs_allocation* dst, rs_allocation src);
+
+extern void __attribute__((overloadable))
+ rsSetObject(rs_sampler* dst, rs_sampler src);
+
+extern void __attribute__((overloadable))
+ rsSetObject(rs_script* dst, rs_script src);
+
+#endif // RENDERSCRIPT_RS_OBJECT_INFO_RSH
diff --git a/current/platform/rs/scriptc/rs_object_types.rsh b/current/platform/rs/scriptc/rs_object_types.rsh
new file mode 100644
index 0000000..e6511a5
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_object_types.rsh
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_object_types.rsh: Object Types
+ *
+ * The types below are used to manipulate RenderScript objects like allocations, samplers,
+ * elements, and scripts. Most of these object are created using the Java RenderScript APIs.
+ */
+
+#ifndef RENDERSCRIPT_RS_OBJECT_TYPES_RSH
+#define RENDERSCRIPT_RS_OBJECT_TYPES_RSH
+
+#define NULL ((void *)0)
+
+// Opaque handle to a RenderScript object. Do not use this directly.
+#ifndef __LP64__
+#define _RS_OBJECT_DECL \
+{\
+ const int* const p;\
+} __attribute__((packed, aligned(4)))
+#else
+#define _RS_OBJECT_DECL \
+{\
+ const long* const p;\
+ const long* const r;\
+ const long* const v1;\
+ const long* const v2;\
+}
+#endif
+
+/*
+ * rs_element: Handle to an element
+ *
+ * An opaque handle to a RenderScript element.
+ *
+ * See android.renderscript.Element.
+ */
+typedef struct rs_element _RS_OBJECT_DECL rs_element;
+
+/*
+ * rs_type: Handle to a Type
+ *
+ * An opaque handle to a RenderScript type.
+ *
+ * See android.renderscript.Type.
+ */
+typedef struct rs_type _RS_OBJECT_DECL rs_type;
+
+/*
+ * rs_allocation: Handle to an allocation
+ *
+ * An opaque handle to a RenderScript allocation.
+ *
+ * See android.renderscript.Allocation.
+ */
+typedef struct rs_allocation _RS_OBJECT_DECL rs_allocation;
+
+/*
+ * rs_sampler: Handle to a Sampler
+ *
+ * An opaque handle to a RenderScript sampler object.
+ *
+ * See android.renderscript.Sampler.
+ */
+typedef struct rs_sampler _RS_OBJECT_DECL rs_sampler;
+
+/*
+ * rs_script: Handle to a Script
+ *
+ * An opaque handle to a RenderScript script object.
+ *
+ * See android.renderscript.ScriptC.
+ */
+typedef struct rs_script _RS_OBJECT_DECL rs_script;
+
+/*
+ * rs_allocation_cubemap_face: Enum for selecting cube map faces
+ *
+ * An enum used to specify one the six faces of a cubemap.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+typedef enum {
+ RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X = 0,
+ RS_ALLOCATION_CUBEMAP_FACE_NEGATIVE_X = 1,
+ RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_Y = 2,
+ RS_ALLOCATION_CUBEMAP_FACE_NEGATIVE_Y = 3,
+ RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_Z = 4,
+ RS_ALLOCATION_CUBEMAP_FACE_NEGATIVE_Z = 5
+} rs_allocation_cubemap_face;
+#endif
+
+/*
+ * rs_allocation_usage_type: Bitfield to specify how an allocation is used
+ *
+ * These values are ORed together to specify which usages or memory spaces are
+ * relevant to an allocation or an operation on an allocation.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 14))
+typedef enum {
+ RS_ALLOCATION_USAGE_SCRIPT = 0x0001, // Allocation is bound to and accessed by scripts.
+ RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE = 0x0002, // Allocation is used as a texture source.
+ RS_ALLOCATION_USAGE_GRAPHICS_VERTEX = 0x0004, // Deprecated.
+ RS_ALLOCATION_USAGE_GRAPHICS_CONSTANTS = 0x0008, // Deprecated.
+ RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET = 0x0010, // Deprecated.
+ RS_ALLOCATION_USAGE_IO_INPUT = 0x0020, // Allocation is used as a Surface consumer.
+ RS_ALLOCATION_USAGE_IO_OUTPUT = 0x0040, // Allocation is used as a Surface producer.
+ RS_ALLOCATION_USAGE_SHARED = 0x0080 // Allocation's backing store is shared with another object (usually a Bitmap). Copying to or from the original source Bitmap will cause a synchronization rather than a full copy.
+} rs_allocation_usage_type;
+#endif
+
+/*
+ * rs_data_type: Element basic data type
+ *
+ * rs_data_type is used to encode the type information of a basic element.
+ *
+ * RS_TYPE_UNSIGNED_5_6_5, RS_TYPE_UNSIGNED_5_5_5_1, RS_TYPE_UNSIGNED_4_4_4_4 are for packed
+ * graphical data formats and represent vectors with per vector member sizes which are treated
+ * as a single unit for packing and alignment purposes.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+typedef enum {
+ RS_TYPE_NONE = 0, // Element is a complex type, i.e. a struct.
+ RS_TYPE_FLOAT_16 = 1, // A 16 bit floating point value.
+ RS_TYPE_FLOAT_32 = 2, // A 32 bit floating point value.
+ RS_TYPE_FLOAT_64 = 3, // A 64 bit floating point value.
+ RS_TYPE_SIGNED_8 = 4, // An 8 bit signed integer.
+ RS_TYPE_SIGNED_16 = 5, // A 16 bit signed integer.
+ RS_TYPE_SIGNED_32 = 6, // A 32 bit signed integer.
+ RS_TYPE_SIGNED_64 = 7, // A 64 bit signed integer.
+ RS_TYPE_UNSIGNED_8 = 8, // An 8 bit unsigned integer.
+ RS_TYPE_UNSIGNED_16 = 9, // A 16 bit unsigned integer.
+ RS_TYPE_UNSIGNED_32 = 10, // A 32 bit unsigned integer.
+ RS_TYPE_UNSIGNED_64 = 11, // A 64 bit unsigned integer.
+ RS_TYPE_BOOLEAN = 12, // 0 or 1 (false or true) stored in an 8 bit container.
+ RS_TYPE_UNSIGNED_5_6_5 = 13, // A 16 bit unsigned integer packing graphical data in 5, 6, and 5 bit sections.
+ RS_TYPE_UNSIGNED_5_5_5_1 = 14, // A 16 bit unsigned integer packing graphical data in 5, 5, 5, and 1 bit sections.
+ RS_TYPE_UNSIGNED_4_4_4_4 = 15, // A 16 bit unsigned integer packing graphical data in 4, 4, 4, and 4 bit sections.
+ RS_TYPE_MATRIX_4X4 = 16, // A 4x4 matrix of 32 bit floats, aligned on a 32 bit boundary.
+ RS_TYPE_MATRIX_3X3 = 17, // A 3x3 matrix of 32 bit floats, aligned on a 32 bit boundary.
+ RS_TYPE_MATRIX_2X2 = 18, // A 2x2 matrix of 32 bit floats, aligned on a 32 bit boundary.
+ RS_TYPE_ELEMENT = 1000, // A handle to an Element.
+ RS_TYPE_TYPE = 1001, // A handle to a Type.
+ RS_TYPE_ALLOCATION = 1002, // A handle to an Allocation.
+ RS_TYPE_SAMPLER = 1003, // A handle to a Sampler.
+ RS_TYPE_SCRIPT = 1004, // A handle to a Script.
+ RS_TYPE_MESH = 1005, // Deprecated.
+ RS_TYPE_PROGRAM_FRAGMENT = 1006, // Deprecated.
+ RS_TYPE_PROGRAM_VERTEX = 1007, // Deprecated.
+ RS_TYPE_PROGRAM_RASTER = 1008, // Deprecated.
+ RS_TYPE_PROGRAM_STORE = 1009, // Deprecated.
+ RS_TYPE_FONT = 1010, // Deprecated.
+ RS_TYPE_INVALID = 10000
+} rs_data_type;
+#endif
+
+/*
+ * rs_data_kind: Element data kind
+ *
+ * This enumeration is primarly useful for graphical data. It provides additional information to
+ * help interpret the rs_data_type.
+ *
+ * RS_KIND_USER indicates no special interpretation is expected.
+ *
+ * The RS_KIND_PIXEL_* values are used in conjunction with the standard data types for representing
+ * texture formats.
+ *
+ * See the Element.createPixel() method.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+typedef enum {
+ RS_KIND_USER = 0, // No special interpretation.
+ RS_KIND_PIXEL_L = 7, // Luminance.
+ RS_KIND_PIXEL_A = 8, // Alpha.
+ RS_KIND_PIXEL_LA = 9, // Luminance and Alpha.
+ RS_KIND_PIXEL_RGB = 10, // Red, Green, Blue.
+ RS_KIND_PIXEL_RGBA = 11, // Red, Green, Blue, and Alpha.
+ RS_KIND_PIXEL_DEPTH = 12, // Depth for a depth texture.
+ RS_KIND_PIXEL_YUV = 13, // Luminance and chrominance.
+ RS_KIND_INVALID = 100
+} rs_data_kind;
+#endif
+
+/*
+ * rs_yuv_format: YUV format
+ *
+ * Android YUV formats that can be associated with a RenderScript Type.
+ *
+ * See android.graphics.ImageFormat for a description of each format.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+typedef enum {
+ RS_YUV_NONE = 0,
+ RS_YUV_YV12 = 0x32315659,
+ RS_YUV_NV21 = 0x11,
+ RS_YUV_420_888 = 0x23
+} rs_yuv_format;
+#endif
+
+/*
+ * rs_sampler_value: Sampler wrap T value
+ *
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+typedef enum {
+ RS_SAMPLER_NEAREST = 0,
+ RS_SAMPLER_LINEAR = 1,
+ RS_SAMPLER_LINEAR_MIP_LINEAR = 2,
+ RS_SAMPLER_WRAP = 3,
+ RS_SAMPLER_CLAMP = 4,
+ RS_SAMPLER_LINEAR_MIP_NEAREST = 5,
+ RS_SAMPLER_MIRRORED_REPEAT = 6,
+ RS_SAMPLER_INVALID = 100
+} rs_sampler_value;
+#endif
+
+#endif // RENDERSCRIPT_RS_OBJECT_TYPES_RSH
diff --git a/current/platform/rs/scriptc/rs_quaternion.rsh b/current/platform/rs/scriptc/rs_quaternion.rsh
new file mode 100644
index 0000000..55d33cf
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_quaternion.rsh
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_quaternion.rsh: Quaternion Functions
+ *
+ * The following functions manipulate quaternions.
+ */
+
+#ifndef RENDERSCRIPT_RS_QUATERNION_RSH
+#define RENDERSCRIPT_RS_QUATERNION_RSH
+
+/*
+ * rsQuaternionAdd: Add two quaternions
+ *
+ * Adds two quaternions, i.e. *q += *rhs;
+ *
+ * Parameters:
+ * q: Destination quaternion to add to.
+ * rhs: Quaternion to add.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionAdd(rs_quaternion* q, const rs_quaternion* rhs) {
+ q->w += rhs->w;
+ q->x += rhs->x;
+ q->y += rhs->y;
+ q->z += rhs->z;
+}
+#endif
+
+/*
+ * rsQuaternionConjugate: Conjugate a quaternion
+ *
+ * Conjugates the quaternion.
+ *
+ * Parameters:
+ * q: Quaternion to modify.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionConjugate(rs_quaternion* q) {
+ q->x = -q->x;
+ q->y = -q->y;
+ q->z = -q->z;
+}
+#endif
+
+/*
+ * rsQuaternionDot: Dot product of two quaternions
+ *
+ * Returns the dot product of two quaternions.
+ *
+ * Parameters:
+ * q0: First quaternion.
+ * q1: Second quaternion.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline float __attribute__((overloadable))
+ rsQuaternionDot(const rs_quaternion* q0, const rs_quaternion* q1) {
+ return q0->w*q1->w + q0->x*q1->x + q0->y*q1->y + q0->z*q1->z;
+}
+#endif
+
+/*
+ * rsQuaternionGetMatrixUnit: Get a rotation matrix from a quaternion
+ *
+ * Computes a rotation matrix from the normalized quaternion.
+ *
+ * Parameters:
+ * m: Resulting matrix.
+ * q: Normalized quaternion.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionGetMatrixUnit(rs_matrix4x4* m, const rs_quaternion* q) {
+ float xx = q->x * q->x;
+ float xy = q->x * q->y;
+ float xz = q->x * q->z;
+ float xw = q->x * q->w;
+ float yy = q->y * q->y;
+ float yz = q->y * q->z;
+ float yw = q->y * q->w;
+ float zz = q->z * q->z;
+ float zw = q->z * q->w;
+
+ m->m[0] = 1.0f - 2.0f * ( yy + zz );
+ m->m[4] = 2.0f * ( xy - zw );
+ m->m[8] = 2.0f * ( xz + yw );
+ m->m[1] = 2.0f * ( xy + zw );
+ m->m[5] = 1.0f - 2.0f * ( xx + zz );
+ m->m[9] = 2.0f * ( yz - xw );
+ m->m[2] = 2.0f * ( xz - yw );
+ m->m[6] = 2.0f * ( yz + xw );
+ m->m[10] = 1.0f - 2.0f * ( xx + yy );
+ m->m[3] = m->m[7] = m->m[11] = m->m[12] = m->m[13] = m->m[14] = 0.0f;
+ m->m[15] = 1.0f;
+}
+#endif
+
+/*
+ * rsQuaternionLoadRotateUnit: Quaternion that represents a rotation about an arbitrary unit vector
+ *
+ * Loads a quaternion that represents a rotation about an arbitrary unit vector.
+ *
+ * Parameters:
+ * q: Destination quaternion.
+ * rot: Angle to rotate by, in radians.
+ * x: X component of the vector.
+ * y: Y component of the vector.
+ * z: Z component of the vector.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionLoadRotateUnit(rs_quaternion* q, float rot, float x, float y, float z) {
+ rot *= (float)(M_PI / 180.0f) * 0.5f;
+ float c = cos(rot);
+ float s = sin(rot);
+
+ q->w = c;
+ q->x = x * s;
+ q->y = y * s;
+ q->z = z * s;
+}
+#endif
+
+/*
+ * rsQuaternionSet: Create a quaternion
+ *
+ * Creates a quaternion from its four components or from another quaternion.
+ *
+ * Parameters:
+ * q: Destination quaternion.
+ * w: W component.
+ * x: X component.
+ * y: Y component.
+ * z: Z component.
+ * rhs: Source quaternion.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionSet(rs_quaternion* q, float w, float x, float y, float z) {
+ q->w = w;
+ q->x = x;
+ q->y = y;
+ q->z = z;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionSet(rs_quaternion* q, const rs_quaternion* rhs) {
+ q->w = rhs->w;
+ q->x = rhs->x;
+ q->y = rhs->y;
+ q->z = rhs->z;
+}
+#endif
+
+/*
+ * rsQuaternionLoadRotate: Create a rotation quaternion
+ *
+ * Loads a quaternion that represents a rotation about an arbitrary vector
+ * (doesn't have to be unit)
+ *
+ * Parameters:
+ * q: Destination quaternion.
+ * rot: Angle to rotate by.
+ * x: X component of a vector.
+ * y: Y component of a vector.
+ * z: Z component of a vector.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionLoadRotate(rs_quaternion* q, float rot, float x, float y, float z) {
+ const float len = x*x + y*y + z*z;
+ if (len != 1) {
+ const float recipLen = 1.f / sqrt(len);
+ x *= recipLen;
+ y *= recipLen;
+ z *= recipLen;
+ }
+ rsQuaternionLoadRotateUnit(q, rot, x, y, z);
+}
+#endif
+
+/*
+ * rsQuaternionNormalize: Normalize a quaternion
+ *
+ * Normalizes the quaternion.
+ *
+ * Parameters:
+ * q: Quaternion to normalize.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionNormalize(rs_quaternion* q) {
+ const float len = rsQuaternionDot(q, q);
+ if (len != 1) {
+ const float recipLen = 1.f / sqrt(len);
+ q->w *= recipLen;
+ q->x *= recipLen;
+ q->y *= recipLen;
+ q->z *= recipLen;
+ }
+}
+#endif
+
+/*
+ * rsQuaternionMultiply: Multiply a quaternion by a scalar or another quaternion
+ *
+ * Multiplies a quaternion by a scalar or by another quaternion, e.g
+ * *q = *q * scalar; or *q = *q * *rhs;.
+ *
+ * Parameters:
+ * q: Destination quaternion.
+ * scalar: Scalar to multiply the quaternion by.
+ * rhs: Quaternion to multiply the destination quaternion by.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionMultiply(rs_quaternion* q, float scalar) {
+ q->w *= scalar;
+ q->x *= scalar;
+ q->y *= scalar;
+ q->z *= scalar;
+}
+#endif
+
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionMultiply(rs_quaternion* q, const rs_quaternion* rhs) {
+ rs_quaternion qtmp;
+ rsQuaternionSet(&qtmp, q);
+
+ q->w = qtmp.w*rhs->w - qtmp.x*rhs->x - qtmp.y*rhs->y - qtmp.z*rhs->z;
+ q->x = qtmp.w*rhs->x + qtmp.x*rhs->w + qtmp.y*rhs->z - qtmp.z*rhs->y;
+ q->y = qtmp.w*rhs->y + qtmp.y*rhs->w + qtmp.z*rhs->x - qtmp.x*rhs->z;
+ q->z = qtmp.w*rhs->z + qtmp.z*rhs->w + qtmp.x*rhs->y - qtmp.y*rhs->x;
+ rsQuaternionNormalize(q);
+}
+#endif
+
+/*
+ * rsQuaternionSlerp: Spherical linear interpolation between two quaternions
+ *
+ * Performs spherical linear interpolation between two quaternions.
+ *
+ * Parameters:
+ * q: Result quaternion from the interpolation.
+ * q0: First input quaternion.
+ * q1: Second input quaternion.
+ * t: How much to interpolate by.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 23)
+static inline void __attribute__((overloadable))
+ rsQuaternionSlerp(rs_quaternion* q, const rs_quaternion* q0, const rs_quaternion* q1, float t) {
+ if (t <= 0.0f) {
+ rsQuaternionSet(q, q0);
+ return;
+ }
+ if (t >= 1.0f) {
+ rsQuaternionSet(q, q1);
+ return;
+ }
+
+ rs_quaternion tempq0, tempq1;
+ rsQuaternionSet(&tempq0, q0);
+ rsQuaternionSet(&tempq1, q1);
+
+ float angle = rsQuaternionDot(q0, q1);
+ if (angle < 0) {
+ rsQuaternionMultiply(&tempq0, -1.0f);
+ angle *= -1.0f;
+ }
+
+ float scale, invScale;
+ if (angle + 1.0f > 0.05f) {
+ if (1.0f - angle >= 0.05f) {
+ float theta = acos(angle);
+ float invSinTheta = 1.0f / sin(theta);
+ scale = sin(theta * (1.0f - t)) * invSinTheta;
+ invScale = sin(theta * t) * invSinTheta;
+ } else {
+ scale = 1.0f - t;
+ invScale = t;
+ }
+ } else {
+ rsQuaternionSet(&tempq1, tempq0.z, -tempq0.y, tempq0.x, -tempq0.w);
+ scale = sin(M_PI * (0.5f - t));
+ invScale = sin(M_PI * t);
+ }
+
+ rsQuaternionSet(q, tempq0.w*scale + tempq1.w*invScale, tempq0.x*scale + tempq1.x*invScale,
+ tempq0.y*scale + tempq1.y*invScale, tempq0.z*scale + tempq1.z*invScale);
+}
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionAdd(rs_quaternion* q, const rs_quaternion* rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionConjugate(rs_quaternion* q);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern float __attribute__((overloadable))
+ rsQuaternionDot(const rs_quaternion* q0, const rs_quaternion* q1);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionGetMatrixUnit(rs_matrix4x4* m, const rs_quaternion* q);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionLoadRotateUnit(rs_quaternion* q, float rot, float x, float y, float z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionSet(rs_quaternion* q, float w, float x, float y, float z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionSet(rs_quaternion* q, const rs_quaternion* rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionLoadRotate(rs_quaternion* q, float rot, float x, float y, float z);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionNormalize(rs_quaternion* q);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionMultiply(rs_quaternion* q, float scalar);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionMultiply(rs_quaternion* q, const rs_quaternion* rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern void __attribute__((overloadable))
+ rsQuaternionSlerp(rs_quaternion* q, const rs_quaternion* q0, const rs_quaternion* q1, float t);
+#endif
+
+#endif // RENDERSCRIPT_RS_QUATERNION_RSH
diff --git a/current/platform/rs/scriptc/rs_time.rsh b/current/platform/rs/scriptc/rs_time.rsh
new file mode 100644
index 0000000..6c0eeb0
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_time.rsh
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_time.rsh: Time Functions and Types
+ *
+ * The functions below can be used to tell the current clock time and the current
+ * system up time. It is not recommended to call these functions inside of a kernel.
+ */
+
+#ifndef RENDERSCRIPT_RS_TIME_RSH
+#define RENDERSCRIPT_RS_TIME_RSH
+
+/*
+ * rs_time_t: Seconds since January 1, 1970
+ *
+ * Calendar time interpreted as seconds elapsed since the Epoch (00:00:00 on
+ * January 1, 1970, Coordinated Universal Time (UTC)).
+ */
+#ifndef __LP64__
+typedef int rs_time_t;
+#endif
+
+#ifdef __LP64__
+typedef long rs_time_t;
+#endif
+
+/*
+ * rs_tm: Date and time structure
+ *
+ * Data structure for broken-down time components.
+ */
+typedef struct {
+ int tm_sec; // Seconds after the minute. This ranges from 0 to 59, but possibly up to 60 for leap seconds.
+ int tm_min; // Minutes after the hour. This ranges from 0 to 59.
+ int tm_hour; // Hours past midnight. This ranges from 0 to 23.
+ int tm_mday; // Day of the month. This ranges from 1 to 31.
+ int tm_mon; // Months since January. This ranges from 0 to 11.
+ int tm_year; // Years since 1900.
+ int tm_wday; // Days since Sunday. This ranges from 0 to 6.
+ int tm_yday; // Days since January 1. This ranges from 0 to 365.
+ int tm_isdst; // Flag to indicate whether daylight saving time is in effect. The value is positive if it is in effect, zero if it is not, and negative if the information is not available.
+} rs_tm;
+
+/*
+ * rsGetDt: Elapsed time since last call
+ *
+ * Returns the time in seconds since this function was last called in this script.
+ *
+ * Returns: Time in seconds.
+ */
+extern float __attribute__((overloadable))
+ rsGetDt(void);
+
+/*
+ * rsLocaltime: Convert to local time
+ *
+ * Converts the time specified by timer into a rs_tm structure that provides year, month,
+ * hour, etc. This value is stored at *local.
+ *
+ * This functions returns the same pointer that is passed as first argument. If the
+ * local parameter is NULL, this function does nothing and returns NULL.
+ *
+ * Parameters:
+ * local: Pointer to time structure where the local time will be stored.
+ * timer: Input time as a number of seconds since January 1, 1970.
+ *
+ * Returns: Pointer to the output local time, i.e. the same value as the parameter local.
+ */
+extern rs_tm* __attribute__((overloadable))
+ rsLocaltime(rs_tm* local, const rs_time_t* timer);
+
+/*
+ * rsTime: Seconds since January 1, 1970
+ *
+ * Returns the number of seconds since the Epoch (00:00:00 UTC, January 1, 1970).
+ *
+ * If timer is non-NULL, the result is also stored in the memory pointed to by
+ * this variable.
+ *
+ * Parameters:
+ * timer: Location to also store the returned calendar time.
+ *
+ * Returns: Seconds since the Epoch, -1 if there's an error.
+ */
+extern rs_time_t __attribute__((overloadable))
+ rsTime(rs_time_t* timer);
+
+/*
+ * rsUptimeMillis: System uptime in milliseconds
+ *
+ * Returns the current system clock (uptime) in milliseconds.
+ *
+ * Returns: Uptime in milliseconds.
+ */
+extern int64_t __attribute__((overloadable))
+ rsUptimeMillis(void);
+
+/*
+ * rsUptimeNanos: System uptime in nanoseconds
+ *
+ * Returns the current system clock (uptime) in nanoseconds.
+ *
+ * The granularity of the values return by this call may be much larger than a nanosecond.
+ *
+ * Returns: Uptime in nanoseconds.
+ */
+extern int64_t __attribute__((overloadable))
+ rsUptimeNanos(void);
+
+#endif // RENDERSCRIPT_RS_TIME_RSH
diff --git a/current/platform/rs/scriptc/rs_value_types.rsh b/current/platform/rs/scriptc/rs_value_types.rsh
new file mode 100644
index 0000000..180b297
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_value_types.rsh
@@ -0,0 +1,543 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_value_types.rsh: Numerical Types
+ *
+ * Scalars:
+ *
+ * RenderScript supports the following scalar numerical types:
+ *
+ * 8 bits 16 bits 32 bits 64 bits
+ * Integer: char, int8_t short, int16_t int32_t long, long long, int64_t
+ * Unsigned integer: uchar, uint8_t ushort, uint16_t uint, uint32_t ulong, uint64_t
+ * Floating point: half float double
+ *
+ *
+ * Vectors:
+ *
+ * RenderScript supports fixed size vectors of length 2, 3, and 4.
+ * Vectors are declared using the common type name followed by a 2, 3, or 4.
+ * E.g. float4, int3, double2, ulong4.
+ *
+ * To create vector literals, use the vector type followed by the values enclosed
+ * between curly braces, e.g. (float3){1.0f, 2.0f, 3.0f}.
+ *
+ * Entries of a vector can be accessed using different naming styles.
+ *
+ * Single entries can be accessed by following the variable name with a dot and:
+ * - The letters x, y, z, and w,
+ * - The letters r, g, b, and a,
+ * - The letter s or S, followed by a zero based index.
+ *
+ * For example, with int4 myVar; the following are equivalent:
+ * myVar.x == myVar.r == myVar.s0 == myVar.S0
+ * myVar.y == myVar.g == myVar.s1 == myVar.S1
+ * myVar.z == myVar.b == myVar.s2 == myVar.S2
+ * myVar.w == myVar.a == myVar.s3 == myVar.S3
+ *
+ * Multiple entries of a vector can be accessed at once by using an identifier that is
+ * the concatenation of multiple letters or indices. The resulting vector has a size
+ * equal to the number of entries named.
+ *
+ * With the example above, the middle two entries can be accessed using
+ * myVar.yz, myVar.gb, myVar.s12, and myVar.S12.
+ *
+ * The entries don't have to be contiguous or in increasing order. Entries can even be
+ * repeated, as long as we're not trying to assign to it. You also can't mix the naming
+ * styles.
+ *
+ * Here are examples of what can or can't be done:
+ * float4 v4;
+ * float3 v3;
+ * float2 v2;
+ * v2 = v4.xx; // Valid
+ * v3 = v4.zxw; // Valid
+ * v3 = v4.bba; // Valid
+ * v3 = v4.s032; // Valid
+ * v3.s120 = v4.S233; // Valid
+ * v4.yz = v3.rg; // Valid
+ * v4.yzx = v3.rg; // Invalid: mismatched sizes
+ * v4.yzz = v3; // Invalid: z appears twice in an assignment
+ * v3 = v3.xas0; // Invalid: can't mix xyzw with rgba nor s0...
+ * v3 = v4.s034; // Invalid: the digit can only be 0, 1, 2, or 3
+ *
+ *
+ * Matrices and Quaternions:
+ *
+ * RenderScript supports fixed size square matrices of floats of size 2x2, 3x3, and 4x4.
+ * The types are named rs_matrix2x2, rs_matrix3x3, and rs_matrix4x4. See
+ * Matrix Functions for the list of operations.
+ *
+ * Quaternions are also supported via rs_quaternion. See Quaterion Functions for the list
+ * of operations.
+ */
+
+#ifndef RENDERSCRIPT_RS_VALUE_TYPES_RSH
+#define RENDERSCRIPT_RS_VALUE_TYPES_RSH
+
+/*
+ * half: 16 bit floating point value
+ *
+ * A 16 bit floating point value.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+typedef __fp16 half;
+#endif
+
+/*
+ * half2: Two 16 bit floats
+ *
+ * Vector version of the half float type. Provides two half fields packed
+ * into a single 32 bit field with 32 bit alignment.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+typedef half __attribute__((ext_vector_type(2))) half2;
+#endif
+
+/*
+ * half3: Three 16 bit floats
+ *
+ * Vector version of the half float type. Provides three half fields packed
+ * into a single 64 bit field with 64 bit alignment.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+typedef half __attribute__((ext_vector_type(3))) half3;
+#endif
+
+/*
+ * half4: Four 16 bit floats
+ *
+ * Vector version of the half float type. Provides four half fields packed
+ * into a single 64 bit field with 64 bit alignment.
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 23))
+typedef half __attribute__((ext_vector_type(4))) half4;
+#endif
+
+/*
+ * int8_t: 8 bit signed integer
+ *
+ * 8 bit signed integer type.
+ */
+typedef char int8_t;
+
+/*
+ * int16_t: 16 bit signed integer
+ *
+ * A 16 bit signed integer type.
+ */
+typedef short int16_t;
+
+/*
+ * int32_t: 32 bit signed integer
+ *
+ * A 32 bit signed integer type.
+ */
+typedef int int32_t;
+
+/*
+ * int64_t: 64 bit signed integer
+ *
+ * A 64 bit signed integer type.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+typedef long long int64_t;
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+typedef long int64_t;
+#endif
+
+/*
+ * uint8_t: 8 bit unsigned integer
+ *
+ * 8 bit unsigned integer type.
+ */
+typedef unsigned char uint8_t;
+
+/*
+ * uint16_t: 16 bit unsigned integer
+ *
+ * A 16 bit unsigned integer type.
+ */
+typedef unsigned short uint16_t;
+
+/*
+ * uint32_t: 32 bit unsigned integer
+ *
+ * A 32 bit unsigned integer type.
+ */
+typedef unsigned int uint32_t;
+
+/*
+ * uint64_t: 64 bit unsigned integer
+ *
+ * A 64 bit unsigned integer type.
+ */
+#if !defined(RS_VERSION) || (RS_VERSION <= 20)
+typedef unsigned long long uint64_t;
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+typedef unsigned long uint64_t;
+#endif
+
+/*
+ * uchar: 8 bit unsigned integer
+ *
+ * 8 bit unsigned integer type.
+ */
+typedef uint8_t uchar;
+
+/*
+ * ushort: 16 bit unsigned integer
+ *
+ * A 16 bit unsigned integer type.
+ */
+typedef uint16_t ushort;
+
+/*
+ * uint: 32 bit unsigned integer
+ *
+ * A 32 bit unsigned integer type.
+ */
+typedef uint32_t uint;
+
+/*
+ * ulong: 64 bit unsigned integer
+ *
+ * A 64 bit unsigned integer type.
+ */
+typedef uint64_t ulong;
+
+/*
+ * size_t: Unsigned size type
+ *
+ * Unsigned size type. The number of bits depend on the compilation flags.
+ */
+#ifdef __LP64__
+typedef uint64_t size_t;
+#endif
+
+#ifndef __LP64__
+typedef uint32_t size_t;
+#endif
+
+/*
+ * ssize_t: Signed size type
+ *
+ * Signed size type. The number of bits depend on the compilation flags.
+ */
+#ifdef __LP64__
+typedef int64_t ssize_t;
+#endif
+
+#ifndef __LP64__
+typedef int32_t ssize_t;
+#endif
+
+/*
+ * float2: Two 32 bit floats
+ *
+ * A vector of two floats. These two floats are packed into a single 64 bit field
+ * with a 64 bit alignment.
+ *
+ * A vector of two floats. These two floats are packed into a single 64 bit field
+ * with a 64 bit alignment.
+ */
+typedef float __attribute__((ext_vector_type(2))) float2;
+
+/*
+ * float3: Three 32 bit floats
+ *
+ * A vector of three floats. These three floats are packed into a single 128 bit field
+ * with a 128 bit alignment.
+ */
+typedef float __attribute__((ext_vector_type(3))) float3;
+
+/*
+ * float4: Four 32 bit floats
+ *
+ * A vector of four floats type. These four floats are packed into a single 128 bit field
+ * with a 128 bit alignment.
+ */
+typedef float __attribute__((ext_vector_type(4))) float4;
+
+/*
+ * double2: Two 64 bit floats
+ *
+ * A vector of two doubles. These two double fields packed into a single 128 bit field
+ * with a 128 bit alignment.
+ */
+typedef double __attribute__((ext_vector_type(2))) double2;
+
+/*
+ * double3: Three 64 bit floats
+ *
+ * A vector of three doubles. These three double fields packed into a single 256 bit field
+ * with a 256 bit alignment.
+ */
+typedef double __attribute__((ext_vector_type(3))) double3;
+
+/*
+ * double4: Four 64 bit floats
+ *
+ * A vector of four doubles. These four double fields packed into a single 256 bit field
+ * with a 256 bit alignment.
+ */
+typedef double __attribute__((ext_vector_type(4))) double4;
+
+/*
+ * uchar2: Two 8 bit unsigned integers
+ *
+ * A vector of two uchars. These two uchar fields packed into a single 16 bit field
+ * with a 16 bit alignment.
+ */
+typedef uchar __attribute__((ext_vector_type(2))) uchar2;
+
+/*
+ * uchar3: Three 8 bit unsigned integers
+ *
+ * A vector of three uchars. These three uchar fields packed into a single 32 bit field
+ * with a 32 bit alignment.
+ */
+typedef uchar __attribute__((ext_vector_type(3))) uchar3;
+
+/*
+ * uchar4: Four 8 bit unsigned integers
+ *
+ * A vector of four uchars. These four uchar fields packed into a single 32 bit field
+ * with a 32 bit alignment.
+ */
+typedef uchar __attribute__((ext_vector_type(4))) uchar4;
+
+/*
+ * ushort2: Two 16 bit unsigned integers
+ *
+ * A vector of two ushorts. These two ushort fields packed into a single 32 bit field
+ * with a 32 bit alignment.
+ */
+typedef ushort __attribute__((ext_vector_type(2))) ushort2;
+
+/*
+ * ushort3: Three 16 bit unsigned integers
+ *
+ * A vector of three ushorts. These three ushort fields packed into a single 64 bit field
+ * with a 64 bit alignment.
+ */
+typedef ushort __attribute__((ext_vector_type(3))) ushort3;
+
+/*
+ * ushort4: Four 16 bit unsigned integers
+ *
+ * A vector of four ushorts. These four ushort fields packed into a single 64 bit field
+ * with a 64 bit alignment.
+ */
+typedef ushort __attribute__((ext_vector_type(4))) ushort4;
+
+/*
+ * uint2: Two 32 bit unsigned integers
+ *
+ * A vector of two uints. These two uints are packed into a single 64 bit field
+ * with a 64 bit alignment.
+ */
+typedef uint __attribute__((ext_vector_type(2))) uint2;
+
+/*
+ * uint3: Three 32 bit unsigned integers
+ *
+ * A vector of three uints. These three uints are packed into a single 128 bit field
+ * with a 128 bit alignment.
+ */
+typedef uint __attribute__((ext_vector_type(3))) uint3;
+
+/*
+ * uint4: Four 32 bit unsigned integers
+ *
+ * A vector of four uints. These four uints are packed into a single 128 bit field
+ * with a 128 bit alignment.
+ */
+typedef uint __attribute__((ext_vector_type(4))) uint4;
+
+/*
+ * ulong2: Two 64 bit unsigned integers
+ *
+ * A vector of two ulongs. These two ulongs are packed into a single 128 bit field
+ * with a 128 bit alignment.
+ */
+typedef ulong __attribute__((ext_vector_type(2))) ulong2;
+
+/*
+ * ulong3: Three 64 bit unsigned integers
+ *
+ * A vector of three ulongs. These three ulong fields packed into a single 256 bit field
+ * with a 256 bit alignment.
+ */
+typedef ulong __attribute__((ext_vector_type(3))) ulong3;
+
+/*
+ * ulong4: Four 64 bit unsigned integers
+ *
+ * A vector of four ulongs. These four ulong fields packed into a single 256 bit field
+ * with a 256 bit alignment.
+ */
+typedef ulong __attribute__((ext_vector_type(4))) ulong4;
+
+/*
+ * char2: Two 8 bit signed integers
+ *
+ * A vector of two chars. These two chars are packed into a single 16 bit field
+ * with a 16 bit alignment.
+ */
+typedef char __attribute__((ext_vector_type(2))) char2;
+
+/*
+ * char3: Three 8 bit signed integers
+ *
+ * A vector of three chars. These three chars are packed into a single 32 bit field
+ * with a 32 bit alignment.
+ */
+typedef char __attribute__((ext_vector_type(3))) char3;
+
+/*
+ * char4: Four 8 bit signed integers
+ *
+ * A vector of four chars. These four chars are packed into a single 32 bit field
+ * with a 32 bit alignment.
+ */
+typedef char __attribute__((ext_vector_type(4))) char4;
+
+/*
+ * short2: Two 16 bit signed integers
+ *
+ * A vector of two shorts. These two shorts are packed into a single 32 bit field
+ * with a 32 bit alignment.
+ */
+typedef short __attribute__((ext_vector_type(2))) short2;
+
+/*
+ * short3: Three 16 bit signed integers
+ *
+ * A vector of three shorts. These three short fields packed into a single 64 bit field
+ * with a 64 bit alignment.
+ */
+typedef short __attribute__((ext_vector_type(3))) short3;
+
+/*
+ * short4: Four 16 bit signed integers
+ *
+ * A vector of four shorts. These four short fields packed into a single 64 bit field
+ * with a 64 bit alignment.
+ */
+typedef short __attribute__((ext_vector_type(4))) short4;
+
+/*
+ * int2: Two 32 bit signed integers
+ *
+ * A vector of two ints. These two ints are packed into a single 64 bit field
+ * with a 64 bit alignment.
+ */
+typedef int __attribute__((ext_vector_type(2))) int2;
+
+/*
+ * int3: Three 32 bit signed integers
+ *
+ * A vector of three ints. These three ints are packed into a single 128 bit field
+ * with a 128 bit alignment.
+ */
+typedef int __attribute__((ext_vector_type(3))) int3;
+
+/*
+ * int4: Four 32 bit signed integers
+ *
+ * A vector of four ints. These two fours are packed into a single 128 bit field
+ * with a 128 bit alignment.
+ */
+typedef int __attribute__((ext_vector_type(4))) int4;
+
+/*
+ * long2: Two 64 bit signed integers
+ *
+ * A vector of two longs. These two longs are packed into a single 128 bit field
+ * with a 128 bit alignment.
+ */
+typedef long __attribute__((ext_vector_type(2))) long2;
+
+/*
+ * long3: Three 64 bit signed integers
+ *
+ * A vector of three longs. These three longs are packed into a single 256 bit field
+ * with a 256 bit alignment.
+ */
+typedef long __attribute__((ext_vector_type(3))) long3;
+
+/*
+ * long4: Four 64 bit signed integers
+ *
+ * A vector of four longs. These four longs are packed into a single 256 bit field
+ * with a 256 bit alignment.
+ */
+typedef long __attribute__((ext_vector_type(4))) long4;
+
+/*
+ * rs_matrix2x2: 2x2 matrix of 32 bit floats
+ *
+ * A square 2x2 matrix of floats. The entries are stored in the array at the
+ * location [row*2 + col].
+ *
+ * See Matrix Functions.
+ */
+typedef struct {
+ float m[4];
+} rs_matrix2x2;
+
+/*
+ * rs_matrix3x3: 3x3 matrix of 32 bit floats
+ *
+ * A square 3x3 matrix of floats. The entries are stored in the array at the
+ * location [row*3 + col].
+ *
+ * See Matrix Functions.
+ */
+typedef struct {
+ float m[9];
+} rs_matrix3x3;
+
+/*
+ * rs_matrix4x4: 4x4 matrix of 32 bit floats
+ *
+ * A square 4x4 matrix of floats. The entries are stored in the array at the
+ * location [row*4 + col].
+ *
+ * See Matrix Functions.
+ */
+typedef struct {
+ float m[16];
+} rs_matrix4x4;
+
+/*
+ * rs_quaternion: Quaternion
+ *
+ * A square 4x4 matrix of floats that represents a quaternion.
+ *
+ * See Quaternion Functions.
+ */
+typedef float4 rs_quaternion;
+
+#endif // RENDERSCRIPT_RS_VALUE_TYPES_RSH
diff --git a/current/platform/rs/scriptc/rs_vector_math.rsh b/current/platform/rs/scriptc/rs_vector_math.rsh
new file mode 100644
index 0000000..2f5e8e7
--- /dev/null
+++ b/current/platform/rs/scriptc/rs_vector_math.rsh
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
+
+/*
+ * rs_vector_math.rsh: Vector Math Functions
+ *
+ * These functions interpret the input arguments as representation of vectors in
+ * n-dimensional space.
+ *
+ * The precision of the mathematical operations on 32 bit floats is affected by the pragmas
+ * rs_fp_relaxed and rs_fp_full. See Mathematical Constants and Functions for details.
+ *
+ * Different precision/speed tradeoffs can be achieved by using variants of the common math
+ * functions. Functions with a name starting with
+ * - native_: May have custom hardware implementations with weaker precision. Additionally,
+ * subnormal values may be flushed to zero, rounding towards zero may be used, and NaN and
+ * infinity input may not be handled correctly.
+ * - fast_: May perform internal computations using 16 bit floats. Additionally, subnormal
+ * values may be flushed to zero, and rounding towards zero may be used.
+ *
+ */
+
+#ifndef RENDERSCRIPT_RS_VECTOR_MATH_RSH
+#define RENDERSCRIPT_RS_VECTOR_MATH_RSH
+
+/*
+ * cross: Cross product of two vectors
+ *
+ * Computes the cross product of two vectors.
+ */
+extern float3 __attribute__((const, overloadable))
+ cross(float3 left_vector, float3 right_vector);
+
+extern float4 __attribute__((const, overloadable))
+ cross(float4 left_vector, float4 right_vector);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ cross(half3 left_vector, half3 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ cross(half4 left_vector, half4 right_vector);
+#endif
+
+/*
+ * distance: Distance between two points
+ *
+ * Compute the distance between two points.
+ *
+ * See also fast_distance(), native_distance().
+ */
+extern float __attribute__((const, overloadable))
+ distance(float left_vector, float right_vector);
+
+extern float __attribute__((const, overloadable))
+ distance(float2 left_vector, float2 right_vector);
+
+extern float __attribute__((const, overloadable))
+ distance(float3 left_vector, float3 right_vector);
+
+extern float __attribute__((const, overloadable))
+ distance(float4 left_vector, float4 right_vector);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ distance(half left_vector, half right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ distance(half2 left_vector, half2 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ distance(half3 left_vector, half3 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ distance(half4 left_vector, half4 right_vector);
+#endif
+
+/*
+ * dot: Dot product of two vectors
+ *
+ * Computes the dot product of two vectors.
+ */
+extern float __attribute__((const, overloadable))
+ dot(float left_vector, float right_vector);
+
+extern float __attribute__((const, overloadable))
+ dot(float2 left_vector, float2 right_vector);
+
+extern float __attribute__((const, overloadable))
+ dot(float3 left_vector, float3 right_vector);
+
+extern float __attribute__((const, overloadable))
+ dot(float4 left_vector, float4 right_vector);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ dot(half left_vector, half right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ dot(half2 left_vector, half2 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ dot(half3 left_vector, half3 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ dot(half4 left_vector, half4 right_vector);
+#endif
+
+/*
+ * fast_distance: Approximate distance between two points
+ *
+ * Computes the approximate distance between two points.
+ *
+ * The precision is what would be expected from doing the computation using 16 bit floating
+ * point values.
+ *
+ * See also distance(), native_distance().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ fast_distance(float left_vector, float right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ fast_distance(float2 left_vector, float2 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ fast_distance(float3 left_vector, float3 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ fast_distance(float4 left_vector, float4 right_vector);
+#endif
+
+/*
+ * fast_length: Approximate length of a vector
+ *
+ * Computes the approximate length of a vector.
+ *
+ * The precision is what would be expected from doing the computation using 16 bit floating
+ * point values.
+ *
+ * See also length(), native_length().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ fast_length(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ fast_length(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ fast_length(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ fast_length(float4 v);
+#endif
+
+/*
+ * fast_normalize: Approximate normalized vector
+ *
+ * Approximately normalizes a vector.
+ *
+ * For vectors of size 1, returns -1.f for negative values, 0.f for null values, and 1.f for
+ * positive values.
+ *
+ * The precision is what would be expected from doing the computation using 16 bit floating
+ * point values.
+ *
+ * See also normalize(), native_normalize().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float __attribute__((const, overloadable))
+ fast_normalize(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float2 __attribute__((const, overloadable))
+ fast_normalize(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float3 __attribute__((const, overloadable))
+ fast_normalize(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+extern float4 __attribute__((const, overloadable))
+ fast_normalize(float4 v);
+#endif
+
+/*
+ * length: Length of a vector
+ *
+ * Computes the length of a vector.
+ *
+ * See also fast_length(), native_length().
+ */
+extern float __attribute__((const, overloadable))
+ length(float v);
+
+extern float __attribute__((const, overloadable))
+ length(float2 v);
+
+extern float __attribute__((const, overloadable))
+ length(float3 v);
+
+extern float __attribute__((const, overloadable))
+ length(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ length(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ length(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ length(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ length(half4 v);
+#endif
+
+/*
+ * native_distance: Approximate distance between two points
+ *
+ * Computes the approximate distance between two points.
+ *
+ * See also distance(), fast_distance().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_distance(float left_vector, float right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_distance(float2 left_vector, float2 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_distance(float3 left_vector, float3 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_distance(float4 left_vector, float4 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_distance(half left_vector, half right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_distance(half2 left_vector, half2 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_distance(half3 left_vector, half3 right_vector);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_distance(half4 left_vector, half4 right_vector);
+#endif
+
+/*
+ * native_length: Approximate length of a vector
+ *
+ * Compute the approximate length of a vector.
+ *
+ * See also length(), fast_length().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_length(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_length(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_length(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_length(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_length(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_length(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_length(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_length(half4 v);
+#endif
+
+/*
+ * native_normalize: Approximately normalize a vector
+ *
+ * Approximately normalizes a vector.
+ *
+ * See also normalize(), fast_normalize().
+ */
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float __attribute__((const, overloadable))
+ native_normalize(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float2 __attribute__((const, overloadable))
+ native_normalize(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float3 __attribute__((const, overloadable))
+ native_normalize(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+extern float4 __attribute__((const, overloadable))
+ native_normalize(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ native_normalize(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ native_normalize(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ native_normalize(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ native_normalize(half4 v);
+#endif
+
+/*
+ * normalize: Normalize a vector
+ *
+ * Normalize a vector.
+ *
+ * For vectors of size 1, returns -1.f for negative values, 0.f for null values, and 1.f for
+ * positive values.
+ *
+ * See also fast_normalize(), native_normalize().
+ */
+extern float __attribute__((const, overloadable))
+ normalize(float v);
+
+extern float2 __attribute__((const, overloadable))
+ normalize(float2 v);
+
+extern float3 __attribute__((const, overloadable))
+ normalize(float3 v);
+
+extern float4 __attribute__((const, overloadable))
+ normalize(float4 v);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half __attribute__((const, overloadable))
+ normalize(half v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half2 __attribute__((const, overloadable))
+ normalize(half2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half3 __attribute__((const, overloadable))
+ normalize(half3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 24))
+extern half4 __attribute__((const, overloadable))
+ normalize(half4 v);
+#endif
+
+#endif // RENDERSCRIPT_RS_VECTOR_MATH_RSH
diff --git a/current/platform/x86/libRSSupport.so b/current/platform/x86/libRSSupport.so
new file mode 100755
index 0000000..782a140
--- /dev/null
+++ b/current/platform/x86/libRSSupport.so
Binary files differ
diff --git a/current/platform/x86/libRSSupportIO.so b/current/platform/x86/libRSSupportIO.so
new file mode 100755
index 0000000..46af321
--- /dev/null
+++ b/current/platform/x86/libRSSupportIO.so
Binary files differ
diff --git a/current/platform/x86/libRScpp_static.a b/current/platform/x86/libRScpp_static.a
new file mode 100644
index 0000000..2906101
--- /dev/null
+++ b/current/platform/x86/libRScpp_static.a
Binary files differ
diff --git a/current/platform/x86/libblasV8.so b/current/platform/x86/libblasV8.so
new file mode 100755
index 0000000..bb3cfc8
--- /dev/null
+++ b/current/platform/x86/libblasV8.so
Binary files differ
diff --git a/current/platform/x86/libcompiler_rt.a b/current/platform/x86/libcompiler_rt.a
new file mode 100644
index 0000000..8f53eb3
--- /dev/null
+++ b/current/platform/x86/libcompiler_rt.a
Binary files differ
diff --git a/current/platform/x86/librsrt.bc b/current/platform/x86/librsrt.bc
new file mode 100644
index 0000000..dc3bda4
--- /dev/null
+++ b/current/platform/x86/librsrt.bc
Binary files differ
diff --git a/current/platform/x86_64/libRSSupport.so b/current/platform/x86_64/libRSSupport.so
new file mode 100755
index 0000000..bf00b0d
--- /dev/null
+++ b/current/platform/x86_64/libRSSupport.so
Binary files differ
diff --git a/current/platform/x86_64/libRSSupportIO.so b/current/platform/x86_64/libRSSupportIO.so
new file mode 100755
index 0000000..239f029
--- /dev/null
+++ b/current/platform/x86_64/libRSSupportIO.so
Binary files differ
diff --git a/current/platform/x86_64/libRScpp_static.a b/current/platform/x86_64/libRScpp_static.a
new file mode 100644
index 0000000..0e3a62d
--- /dev/null
+++ b/current/platform/x86_64/libRScpp_static.a
Binary files differ
diff --git a/current/platform/x86_64/libblasV8.so b/current/platform/x86_64/libblasV8.so
new file mode 100755
index 0000000..d01e07a
--- /dev/null
+++ b/current/platform/x86_64/libblasV8.so
Binary files differ
diff --git a/current/platform/x86_64/libcompiler_rt.a b/current/platform/x86_64/libcompiler_rt.a
new file mode 100644
index 0000000..dd7315b
--- /dev/null
+++ b/current/platform/x86_64/libcompiler_rt.a
Binary files differ
diff --git a/current/platform/x86_64/librsrt.bc b/current/platform/x86_64/librsrt.bc
new file mode 100644
index 0000000..239bb97
--- /dev/null
+++ b/current/platform/x86_64/librsrt.bc
Binary files differ